diff --git "a/unet.xml" "b/unet.xml"
--- "a/unet.xml"
+++ "b/unet.xml"
@@ -1,10 +1,10 @@
-<?xml version="1.0" ?>
+<?xml version="1.0"?>
 <net name="torch_jit" version="11">
 	<layers>
 		<layer id="2" name="latent_model_input" type="Parameter" version="opset1">
-			<data shape="2,4,64,64" element_type="f32"/>
+			<data shape="2,4,64,64" element_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="latent_model_input"/>
+				<attribute name="old_api_map_element_type" version="0" value="f16" />
 			</rt_info>
 			<output>
 				<port id="0" precision="FP32" names="latent_model_input">
@@ -16,18 +16,18 @@
 			</output>
 		</layer>
 		<layer id="1" name="t" type="Parameter" version="opset1">
-			<data shape="" element_type="f64"/>
+			<data shape="" element_type="f64" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="t"/>
+				<attribute name="old_api_map_element_type" version="0" value="f16" />
 			</rt_info>
 			<output>
-				<port id="0" precision="FP64" names="t"/>
+				<port id="0" precision="FP64" names="t" />
 			</output>
 		</layer>
 		<layer id="0" name="encoder_hidden_states" type="Parameter" version="opset1">
-			<data shape="2,77,768" element_type="f32"/>
+			<data shape="2,77,768" element_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="encoder_hidden_states"/>
+				<attribute name="old_api_map_element_type" version="0" value="f16" />
 			</rt_info>
 			<output>
 				<port id="0" precision="FP32" names="encoder_hidden_states">
@@ -37,1916 +37,1971 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3" name="Constant_150618" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="0" size="1280"/>
+		<layer id="3" name="Constant_87208_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="0" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4" name="Constant_150617" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 2560" offset="1280" size="10240"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="4" name="Constant_87208" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="5" name="Constant_150614" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="11520" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="6" name="Constant_150610" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="12800" size="1280"/>
+		<layer id="5" name="Constant_87207_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 2560" offset="640" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="7" name="Constant_150599" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="14080" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="6" name="Constant_87207" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>2560</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="8" name="Constant_150598" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 2560" offset="15360" size="10240"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="9" name="Constant_150595" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="25600" size="1280"/>
+		<layer id="7" name="Constant_87204_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="5760" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="10" name="Constant_150591" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="26880" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="8" name="Constant_87204" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="11" name="Constant_150580" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="28160" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="12" name="Constant_150579" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 2560" offset="29440" size="10240"/>
+		<layer id="9" name="Constant_87200_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="6400" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="13" name="Constant_150576" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="39680" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="10" name="Constant_87200" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="14" name="Constant_150572" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="40960" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="15" name="Constant_150561" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="42240" size="2560"/>
+		<layer id="11" name="Constant_87189_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="7040" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="16" name="Constant_150560" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 5120" offset="44800" size="20480"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="12" name="Constant_87189" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>5120</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="17" name="Constant_150557" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="65280" size="2560"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="18" name="Constant_150553" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="67840" size="2560"/>
+		<layer id="13" name="Constant_87188_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 2560" offset="7680" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="19" name="Constant_150542" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="70400" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="14" name="Constant_87188" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="20" name="Constant_150541" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 5120" offset="72960" size="20480"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>5120</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="21" name="Constant_150538" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="93440" size="2560"/>
+		<layer id="15" name="Constant_87185_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="12800" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="22" name="Constant_150534" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="96000" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="16" name="Constant_87185" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="23" name="Constant_150523" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="98560" size="2560"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="24" name="Constant_150522" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 5120" offset="101120" size="20480"/>
+		<layer id="17" name="Constant_87181_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="13440" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>5120</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="25" name="Constant_150519" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="121600" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="18" name="Constant_87181" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="26" name="Constant_150515" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="124160" size="2560"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="27" name="Constant_150504" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="126720" size="5120"/>
+		<layer id="19" name="Constant_87170_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="14080" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="28" name="Constant_150503" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 10240" offset="131840" size="40960"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="20" name="Constant_87170" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="29" name="Constant_150500" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="172800" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="30" name="Constant_150496" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="177920" size="5120"/>
+		<layer id="21" name="Constant_87169_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 2560" offset="14720" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="31" name="Constant_150485" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="183040" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="22" name="Constant_87169" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>2560</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="32" name="Constant_150484" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 10240" offset="188160" size="40960"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="33" name="Constant_150481" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="229120" size="5120"/>
+		<layer id="23" name="Constant_87166_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="19840" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="34" name="Constant_150477" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="234240" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="24" name="Constant_87166" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="35" name="Constant_150466" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="239360" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="36" name="Constant_150465" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 10240" offset="244480" size="40960"/>
+		<layer id="25" name="Constant_87162_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="20480" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="37" name="Constant_150462" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="285440" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="26" name="Constant_87162" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="38" name="Constant_150458" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="290560" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="39" name="Constant_150427" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="295680" size="5120"/>
+		<layer id="27" name="Constant_87151_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="21120" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="40" name="Constant_150426" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 10240" offset="300800" size="40960"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="28" name="Constant_87151" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="41" name="Constant_150423" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="341760" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="42" name="Constant_150419" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="346880" size="5120"/>
+		<layer id="29" name="Constant_87150_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 5120" offset="22400" size="10240" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="43" name="Constant_150398" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="352000" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="30" name="Constant_87150" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="44" name="Constant_150397" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 10240" offset="357120" size="40960"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="45" name="Constant_150394" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="398080" size="5120"/>
+		<layer id="31" name="Constant_87147_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="32640" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="46" name="Constant_150390" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="403200" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="32" name="Constant_87147" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="47" name="Constant_150379" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="408320" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="48" name="Constant_150378" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 10240" offset="413440" size="40960"/>
+		<layer id="33" name="Constant_87143_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="33920" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="49" name="Constant_150375" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="454400" size="5120"/>
+		<layer id="34" name="Constant_87143" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="50" name="Constant_150371" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="459520" size="5120"/>
+		<layer id="35" name="Constant_87132_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="35200" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="51" name="Constant_150360" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="464640" size="2560"/>
+		<layer id="36" name="Constant_87132" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="52" name="Constant_150359" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 5120" offset="467200" size="20480"/>
+		<layer id="37" name="Constant_87131_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 5120" offset="36480" size="10240" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="53" name="Constant_150356" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="487680" size="2560"/>
+		<layer id="38" name="Constant_87131" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="54" name="Constant_150352" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="490240" size="2560"/>
+		<layer id="39" name="Constant_87128_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="46720" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="55" name="Constant_150341" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="492800" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="40" name="Constant_87128" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="56" name="Constant_150340" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 5120" offset="495360" size="20480"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>5120</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="57" name="Constant_150337" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="515840" size="2560"/>
+		<layer id="41" name="Constant_87124_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="48000" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="58" name="Constant_150333" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="518400" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="42" name="Constant_87124" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="59" name="Constant_150322" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="520960" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="60" name="Constant_150321" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 2560" offset="522240" size="10240"/>
+		<layer id="43" name="Constant_87113_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="49280" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>2560</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="61" name="Constant_150318" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="532480" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+		<layer id="44" name="Constant_87113" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="62" name="Constant_150314" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="533760" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="63" name="Constant_150303" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="535040" size="1280"/>
+		<layer id="45" name="Constant_87112_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 5120" offset="50560" size="10240" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="64" name="Constant_150302" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 2560" offset="536320" size="10240"/>
+		<layer id="46" name="Constant_87112" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>2560</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="65" name="Constant_150299" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="546560" size="1280"/>
+		<layer id="47" name="Constant_87109_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="60800" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="66" name="Constant_150295" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="547840" size="1280"/>
+		<layer id="48" name="Constant_87109" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="67" name="m.conv_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 4, 3, 3" offset="549120" size="46080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.conv_in.weight"/>
-			</rt_info>
+		<layer id="49" name="Constant_87105_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="62080" size="1280" />
 			<output>
-				<port id="0" precision="FP32" names="m.conv_in.weight">
-					<dim>320</dim>
-					<dim>4</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="68" name="Convolution_800" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="50" name="Constant_87105" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_800"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>4</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="69" name="Reshape_820" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="595200" size="1280"/>
+		<layer id="51" name="Constant_87094_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="63360" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="70" name="onnx::Cast_728" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="52" name="Constant_87094" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_819, Reshape_820, input.8, onnx::Cast_728"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.8,onnx::Cast_728">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="71" name="onnx::Reshape_730" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_730"/>
-			</rt_info>
+		<layer id="53" name="Constant_87093_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 10240" offset="65920" size="20480" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_730">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="72" name="onnx::InstanceNormalization_731" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="54" name="Constant_87093" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_731"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_731">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="73" name="Constant_858" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_858"/>
-			</rt_info>
+		<layer id="55" name="Constant_87090_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="86400" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="74" name="MVN_859" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="56" name="Constant_87090" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_878, Concat_923, MVN_859, Multiply_906, Reshape_879, Reshape_924, onnx::Reshape_734"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_734">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="75" name="onnx::Reshape_735" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_735"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
+		<layer id="57" name="Constant_87086_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="88960" size="2560" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_735">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="76" name="onnx::Mul_736" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="58" name="Constant_87086" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_736"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_736">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="77" name="Constant_150283" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="596512" size="1280"/>
+		<layer id="59" name="Constant_87075_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="91520" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="78" name="onnx::Add_739" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="60" name="Constant_87075" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_739"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_739">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="79" name="Constant_150284" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="597792" size="1280"/>
+		<layer id="61" name="Constant_87074_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 10240" offset="94080" size="20480" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="80" name="onnx::Cast_742" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="62" name="Constant_87074" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.12, onnx::Cast_742"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="63" name="Constant_87071_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="114560" size="2560" />
 			<output>
-				<port id="2" precision="FP32" names="input.12,onnx::Cast_742">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="81" name="input.16" type="Swish" version="opset4">
+		<layer id="64" name="Constant_87071" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.16, onnx::Mul_744"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.16">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="82" name="m.down_blocks.0.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="599072" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="65" name="Constant_87067_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="117120" size="2560" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.resnets.0.conv1.weight">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="83" name="Convolution_964" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="66" name="Constant_87067" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_964"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="84" name="Reshape_984" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="4285472" size="1280"/>
+		<layer id="67" name="Constant_87056_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="119680" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="85" name="onnx::Add_746" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="68" name="Constant_87056" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_983, Reshape_984, onnx::Add_746"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_746">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="86" name="onnx::Unsqueeze_689" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_689"/>
-			</rt_info>
+		<layer id="69" name="Constant_87055_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 10240" offset="122240" size="20480" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_689">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="87" name="onnx::Cast_690" type="Unsqueeze" version="opset1">
+		<layer id="70" name="Constant_87055" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_690, onnx::Expand_691"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP64" names="onnx::Cast_690,onnx::Expand_691">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="88" name="onnx::Expand_703" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
+		<layer id="71" name="Constant_87052_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="142720" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Expand_703">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="89" name="timesteps" type="Broadcast" version="opset3">
-			<data mode="bidirectional"/>
+		<layer id="72" name="Constant_87052" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Expand_703, timesteps"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP64" names="timesteps">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="90" name="onnx::Unsqueeze_705" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_705"/>
-			</rt_info>
+		<layer id="73" name="Constant_87048_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="145280" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_705">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="91" name="onnx::Cast_706" type="Unsqueeze" version="opset1">
+		<layer id="74" name="Constant_87048" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_706"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP64" names="onnx::Cast_706">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="75" name="Constant_87017_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="147840" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="92" name="onnx::Mul_707" type="Convert" version="opset1">
-			<data destination_type="f32"/>
+		<layer id="76" name="Constant_87017" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_707"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_707">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="93" name="onnx::Mul_708" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 160" offset="4286768" size="640"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_708"/>
-			</rt_info>
+		<layer id="77" name="Constant_87016_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 10240" offset="150400" size="20480" />
 			<output>
-				<port id="0" precision="FP32" names="onnx::Mul_708">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>160</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="94" name="onnx::Sin_709" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="78" name="Constant_87016" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Sin_709"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>160</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Sin_709">
-					<dim>2</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="95" name="onnx::Concat_710" type="Sin" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_710"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>160</dim>
-				</port>
-			</input>
+		<layer id="79" name="Constant_87013_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="170880" size="2560" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Concat_710">
-					<dim>2</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="96" name="onnx::Concat_711" type="Cos" version="opset1">
+		<layer id="80" name="Constant_87013" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_711"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Concat_711">
-					<dim>2</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="97" name="onnx::Slice_712" type="Concat" version="opset1">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Slice_712"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>160</dim>
-				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>160</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Slice_712">
-					<dim>2</dim>
-					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="98" name="Constant_125643" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287408" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_717"/>
-			</rt_info>
+		<layer id="81" name="Constant_87009_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="173440" size="2560" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="99" name="Constant_125646" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287424" size="16"/>
+		<layer id="82" name="Constant_87009" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_717"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="100" name="Constant_125649" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287440" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_717"/>
-			</rt_info>
+		<layer id="83" name="Constant_86988_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="176000" size="2560" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="101" name="onnx::Concat_717" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 0" end_mask="1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
+		<layer id="84" name="Constant_86988" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_717"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Concat_717">
-					<dim>2</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="102" name="Constant_125655" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_722"/>
-			</rt_info>
+		<layer id="85" name="Constant_86987_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 10240" offset="178560" size="20480" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="103" name="Constant_125658" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287408" size="16"/>
+		<layer id="86" name="Constant_86987" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_722"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="104" name="Constant_125661" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287440" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_722"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="105" name="onnx::Concat_722" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 0" end_mask="1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_722"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="87" name="Constant_86984_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="199040" size="2560" />
 			<output>
-				<port id="4" precision="FP32" names="onnx::Concat_722">
-					<dim>2</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="106" name="onnx::Gemm_723" type="Concat" version="opset1">
-			<data axis="-1"/>
+		<layer id="88" name="Constant_86984" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_723"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Gemm_723">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="107" name="m.time_embedding.linear_1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 320" offset="4287472" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.time_embedding.linear_1.weight"/>
-			</rt_info>
+		<layer id="89" name="Constant_86980_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="201600" size="2560" />
 			<output>
-				<port id="0" precision="FP32" names="m.time_embedding.linear_1.weight">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="108" name="MatMul_790" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="90" name="Constant_86980" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_790"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="109" name="Constant_150285" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="5925872" size="5120"/>
+		<layer id="91" name="Constant_86969_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="204160" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="110" name="input" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="92" name="Constant_86969" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_791, input"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="93" name="Constant_86968_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 10240" offset="206720" size="20480" />
 			<output>
-				<port id="2" precision="FP32" names="input">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="111" name="onnx::Gemm_726" type="Swish" version="opset4">
+		<layer id="94" name="Constant_86968" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_726, onnx::Mul_725"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_726">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="112" name="m.time_embedding.linear_2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="5930992" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.time_embedding.linear_2.weight"/>
-			</rt_info>
+		<layer id="95" name="Constant_86965_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="227200" size="2560" />
 			<output>
-				<port id="0" precision="FP32" names="m.time_embedding.linear_2.weight">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="113" name="MatMul_797" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="96" name="Constant_86965" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_797"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="114" name="Constant_150286" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="12484592" size="5120"/>
+		<layer id="97" name="Constant_86961_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="229760" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="115" name="input.4" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="98" name="Constant_86961" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_798, input.4"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="99" name="Constant_86950_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="232320" size="1280" />
 			<output>
-				<port id="2" precision="FP32" names="input.4">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="116" name="onnx::Gemm_748" type="Swish" version="opset4">
+		<layer id="100" name="Constant_86950" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_748, onnx::Mul_747"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_748">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="117" name="m.down_blocks.0.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="12489712" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.resnets.0.time_emb_proj.weight"/>
-			</rt_info>
+		<layer id="101" name="Constant_86949_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 5120" offset="233600" size="10240" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.resnets.0.time_emb_proj.weight">
-					<dim>320</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="118" name="MatMul_1016" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="102" name="Constant_86949" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_1016"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="119" name="Constant_150287" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320" offset="14128112" size="1280"/>
+		<layer id="103" name="Constant_86946_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="243840" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="120" name="onnx::Unsqueeze_749" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="104" name="Constant_86946" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_1017, onnx::Unsqueeze_749"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_749">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="121" name="onnx::Unsqueeze_750" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_750"/>
-			</rt_info>
+		<layer id="105" name="Constant_86942_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="245120" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_750">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="122" name="onnx::Unsqueeze_751" type="Unsqueeze" version="opset1">
+		<layer id="106" name="Constant_86942" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_751"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_751">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="123" name="onnx::Unsqueeze_752" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_752"/>
-			</rt_info>
+		<layer id="107" name="Constant_86931_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="246400" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_752">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="124" name="onnx::Add_753" type="Unsqueeze" version="opset1">
+		<layer id="108" name="Constant_86931" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_753"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_753">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="109" name="Constant_86930_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 5120" offset="247680" size="10240" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="125" name="onnx::Cast_754" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="110" name="Constant_86930" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.20, onnx::Cast_754"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.20,onnx::Cast_754">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="126" name="onnx::Reshape_756" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_756"/>
-			</rt_info>
+		<layer id="111" name="Constant_86927_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="257920" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_756">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="127" name="onnx::InstanceNormalization_757" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="112" name="Constant_86927" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_757"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_757">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="128" name="Constant_1034" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1034"/>
-			</rt_info>
+		<layer id="113" name="Constant_86923_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="259200" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="129" name="MVN_1035" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="114" name="Constant_86923" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_1054, Concat_1099, MVN_1035, Multiply_1082, Reshape_1055, Reshape_1100, onnx::Reshape_760"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_760">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="115" name="Constant_86912_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="260480" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="130" name="onnx::Reshape_761" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="116" name="Constant_86912" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_761"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_761">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="117" name="Constant_86911_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 2560" offset="261120" size="5120" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="131" name="onnx::Mul_762" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="118" name="Constant_86911" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_762"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_762">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="132" name="Constant_150288" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="14129400" size="1280"/>
+		<layer id="119" name="Constant_86908_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="266240" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="133" name="onnx::Add_765" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="120" name="Constant_86908" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_765"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="121" name="Constant_86904_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="266880" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="122" name="Constant_86904" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_765">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="134" name="Constant_150289" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="14130680" size="1280"/>
+		<layer id="123" name="Constant_86893_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="267520" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="135" name="onnx::Cast_768" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="124" name="Constant_86893" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.24, onnx::Cast_768"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="125" name="Constant_86892_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 2560" offset="268160" size="5120" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="126" name="Constant_86892" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.24,onnx::Cast_768">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="127" name="Constant_86889_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="273280" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="136" name="input.28" type="Swish" version="opset4">
+		<layer id="128" name="Constant_86889" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.28, onnx::Mul_770"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.28">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="129" name="Constant_86885_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="273920" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="137" name="m.down_blocks.0.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="14131960" size="3686400"/>
+		<layer id="130" name="Constant_86885" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.resnets.0.conv2.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.resnets.0.conv2.weight">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="131" name="conv_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 4, 3, 3" offset="274560" size="23040" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
+					<dim>4</dim>
 					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="138" name="Convolution_1140" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="132" name="conv_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_1140"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>4</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="conv_in.weight">
+					<dim>320</dim>
+					<dim>4</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="133" name="/conv_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
+					<dim>4</dim>
 					<dim>64</dim>
 					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>320</dim>
+					<dim>4</dim>
 					<dim>3</dim>
 					<dim>3</dim>
 				</port>
@@ -1960,10 +2015,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="139" name="Reshape_1160" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="17818360" size="1280"/>
+		<layer id="134" name="Reshape_13518_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="297600" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -1971,19 +2026,13 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="140" name="onnx::Add_772" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="135" name="Reshape_13518" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_1159, Reshape_1160, onnx::Add_772"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -1991,19 +2040,16 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_772">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="141" name="onnx::Div_773" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.32, onnx::Div_773"/>
-			</rt_info>
+		<layer id="136" name="/conv_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2012,14 +2058,14 @@
 					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.32,onnx::Div_773">
+				<port id="2" precision="FP32" names="/conv_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -2027,22 +2073,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="142" name="onnx::Reshape_780" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_780"/>
-			</rt_info>
+		<layer id="137" name="/down_blocks.0/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_780">
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="143" name="onnx::InstanceNormalization_781" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_781"/>
-			</rt_info>
+		<layer id="138" name="/down_blocks.0/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2055,29 +2095,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_781">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="144" name="Constant_1200" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1200"/>
-			</rt_info>
+		<layer id="139" name="Constant_13555" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="145" name="MVN_1201" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_1220, Concat_1265, MVN_1201, Multiply_1248, Reshape_1221, Reshape_1266, onnx::Reshape_784"/>
-			</rt_info>
+		<layer id="140" name="MVN_13556" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2089,18 +2123,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_784">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="146" name="onnx::Reshape_785" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_785"/>
-			</rt_info>
+		<layer id="141" name="/down_blocks.0/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2110,16 +2141,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_785">
+				<port id="1" precision="I64" names="/down_blocks.0/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="147" name="onnx::Mul_786" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_786"/>
-			</rt_info>
+		<layer id="142" name="/down_blocks.0/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2131,7 +2159,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_786">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -2139,10 +2167,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="148" name="Constant_150290" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="17819640" size="1280"/>
+		<layer id="143" name="Constant_86873_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="298272" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -2150,19 +2178,13 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="149" name="onnx::Add_789" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="144" name="Constant_86873" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_789"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -2170,18 +2192,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_789">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="150" name="Constant_150291" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="17820920" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -2189,11 +2200,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="151" name="input.36" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.36"/>
-			</rt_info>
+		<layer id="145" name="/down_blocks.0/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2209,7 +2217,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.36">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -2217,52 +2225,32 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="152" name="m.down_blocks.0.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="17822200" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.attentions.0.proj_in.weight"/>
-			</rt_info>
+		<layer id="146" name="Constant_86874_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="298912" size="640" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.attentions.0.proj_in.weight">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="153" name="Convolution_1303" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="147" name="Constant_86874" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_1303"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="154" name="Reshape_1323" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="18231800" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -2270,11 +2258,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="155" name="onnx::Transpose_793" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_1322, Reshape_1323, onnx::Transpose_793"/>
-			</rt_info>
+		<layer id="148" name="/down_blocks.0/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2290,7 +2275,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_793">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -2298,21 +2283,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="156" name="Constant_1351" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1351"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="157" name="onnx::Reshape_794" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_794"/>
-			</rt_info>
+		<layer id="149" name="/down_blocks.0/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -2320,1990 +2291,1754 @@
 					<dim>64</dim>
 					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_794">
+				<port id="1" precision="FP32" names="/down_blocks.0/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
+					<dim>320</dim>
 					<dim>64</dim>
 					<dim>64</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="158" name="onnx::Reshape_8380" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="18233112" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8380"/>
-			</rt_info>
+		<layer id="150" name="down_blocks.0.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="299552" size="1843200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8307,onnx::Reshape_8380">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="159" name="input.40" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="151" name="down_blocks.0.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.40"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>320</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.40">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32" names="down_blocks.0.resnets.0.conv1.weight">
 					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="160" name="Constant_1359" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1359"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="161" name="onnx::Mul_812" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_808, onnx::Div_811, onnx::Mul_812, onnx::Pow_805, onnx::ReduceMean_807, onnx::Sqrt_810, onnx::Sub_804"/>
-			</rt_info>
+		<layer id="152" name="/down_blocks.0/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_812">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="162" name="Constant_150292" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="18233144" size="1280"/>
+		<layer id="153" name="Reshape_13680_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="2142752" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="163" name="onnx::Add_813" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="154" name="Reshape_13680" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_813"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_813">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="164" name="Constant_150293" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="18234424" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="165" name="onnx::MatMul_814" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_814"/>
-			</rt_info>
+		<layer id="155" name="/down_blocks.0/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_814">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="166" name="Constant_147414" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="18235704" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8308, q"/>
-			</rt_info>
+		<layer id="156" name="/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64" names="/Constant_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="167" name="q" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8308, q"/>
-			</rt_info>
+		<layer id="157" name="/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP64" />
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="2" precision="FP64" names="/Cast_output_0,/Unsqueeze_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="168" name="onnx::Reshape_8324" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645304" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8324"/>
-			</rt_info>
+		<layer id="158" name="/Where" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8315,onnx::Reshape_8324,onnx::Reshape_8333,onnx::Reshape_8388,onnx::Reshape_8397,onnx::Reshape_8406">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/Where_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="169" name="onnx::Transpose_834" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_834"/>
-			</rt_info>
+		<layer id="159" name="/Expand" type="Broadcast" version="opset3">
+			<data mode="bidirectional" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP64">
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_834">
+				<port id="2" precision="FP64" names="/Expand_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="170" name="Constant_1381" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1381"/>
-			</rt_info>
+		<layer id="160" name="/time_proj/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/time_proj/Constant_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="171" name="onnx::Reshape_835" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_835"/>
-			</rt_info>
+		<layer id="161" name="/time_proj/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP64">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_835">
+				<port id="2" precision="FP64" names="/time_proj/Unsqueeze_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="172" name="onnx::Reshape_8328" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="18645368" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8328"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8319,onnx::Reshape_8328,onnx::Reshape_8337,onnx::Reshape_8392,onnx::Reshape_8401,onnx::Reshape_8410">
-					<dim>3</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="173" name="q.3" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.3"/>
-			</rt_info>
+		<layer id="162" name="/time_proj/Cast" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP64">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.3">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="1" precision="FP32" names="/time_proj/Cast_output_0">
+					<dim>2</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="174" name="Constant_147421" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="18645392" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k, onnx::MatMul_8309"/>
-			</rt_info>
+		<layer id="163" name="/time_proj/Constant_1_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 160" offset="2143408" size="320" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="175" name="k" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="164" name="/time_proj/Constant_1" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k, onnx::MatMul_8309"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32" names="/time_proj/Constant_1_output_0">
+					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="176" name="onnx::Transpose_859" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_859"/>
-			</rt_info>
+		<layer id="165" name="/time_proj/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_859">
+				<port id="2" precision="FP32" names="/time_proj/Mul_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="177" name="Constant_1392" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1392"/>
-			</rt_info>
+		<layer id="166" name="/time_proj/Sin" type="Sin" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>160</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32" names="/time_proj/Sin_output_0">
+					<dim>2</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="178" name="onnx::Reshape_860" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_860"/>
-			</rt_info>
+		<layer id="167" name="/time_proj/Cos" type="Cos" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_860">
+				<port id="1" precision="FP32" names="/time_proj/Cos_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="179" name="k.3" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.3"/>
-			</rt_info>
+		<layer id="168" name="/time_proj/Concat" type="Concat" version="opset1">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.3">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="180" name="onnx::Mul_896" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_896"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_896">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="181" name="Constant_150294" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/time_proj/Concat_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="182" name="onnx::Softmax_898" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_898"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="169" name="Constant_76184" type="Const" version="opset1">
+			<data element_type="i64" shape="2" offset="2143728" size="16" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_898">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="0" precision="I64">
+					<dim>2</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="183" name="attn" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</input>
+		<layer id="170" name="Constant_76187" type="Const" version="opset1">
+			<data element_type="i64" shape="2" offset="2143744" size="16" />
 			<output>
-				<port id="1" precision="FP32" names="attn">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="0" precision="I64">
+					<dim>2</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="184" name="Constant_147428" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="19054996" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8310, v"/>
-			</rt_info>
+		<layer id="171" name="Constant_76190" type="Const" version="opset1">
+			<data element_type="i64" shape="2" offset="2143760" size="16" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
+					<dim>2</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="185" name="v" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8310, v"/>
-			</rt_info>
+		<layer id="172" name="/time_proj/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 0" end_mask="1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
 					<dim>320</dim>
 				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v">
+				<port id="1" precision="I64">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="186" name="onnx::Transpose_884" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_884"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="2" precision="I64">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="3" precision="I64">
+					<dim>2</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_884">
+				<port id="4" precision="FP32" names="/time_proj/Slice_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="187" name="Constant_1400" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1400"/>
-			</rt_info>
+		<layer id="173" name="Constant_76196" type="Const" version="opset1">
+			<data element_type="i64" shape="2" offset="2143776" size="16" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>2</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="188" name="onnx::Reshape_885" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_885"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+		<layer id="174" name="Constant_76199" type="Const" version="opset1">
+			<data element_type="i64" shape="2" offset="2143728" size="16" />
+			<output>
+				<port id="0" precision="I64">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="175" name="Constant_76202" type="Const" version="opset1">
+			<data element_type="i64" shape="2" offset="2143760" size="16" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_885">
+				<port id="0" precision="I64">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="189" name="v.3" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.3"/>
-			</rt_info>
+		<layer id="176" name="/time_proj/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 0" end_mask="1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>2</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>2</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>2</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.3">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="4" precision="FP32" names="/time_proj/Slice_1_output_0">
+					<dim>2</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="190" name="out" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out"/>
-			</rt_info>
+		<layer id="177" name="/time_proj/Concat_1" type="Concat" version="opset1">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32" names="/Cast_1_output_0,/time_proj/Concat_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="191" name="onnx::Gather_901" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_901, onnx::Gather_904, onnx::Gather_907"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</input>
+		<layer id="178" name="time_embedding.linear_1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 320" offset="2143792" size="819200" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_901,onnx::Gather_904,onnx::Gather_907">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="192" name="onnx::Gather_902" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_902"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_902"/>
-			</output>
-		</layer>
-		<layer id="193" name="Constant_1412" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1412"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="194" name="onnx::Div_903" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="179" name="time_embedding.linear_1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1412, onnx::Div_903, onnx::Gather_902"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_903"/>
-			</output>
-		</layer>
-		<layer id="195" name="onnx::Div_910" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_910"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_910"/>
+				<port id="1" precision="FP32" names="time_embedding.linear_1.weight">
+					<dim>1280</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="196" name="onnx::Cast_911" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_911, onnx::Cast_912, onnx::Div_910, onnx::Unsqueeze_913"/>
-			</rt_info>
+		<layer id="180" name="/time_embedding/linear_1/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_911,onnx::Cast_912,onnx::Unsqueeze_913"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="197" name="onnx::Unsqueeze_915" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_915"/>
-			</rt_info>
+		<layer id="181" name="Constant_86875_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="2962992" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_915">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="198" name="onnx::Concat_916" type="Unsqueeze" version="opset1">
+		<layer id="182" name="Constant_86875" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_916, onnx::Unsqueeze_915"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_916">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="199" name="onnx::Concat_8343" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Concat_8338,onnx::Concat_8343,onnx::Concat_8362,onnx::Concat_8411,onnx::Concat_8416,onnx::Concat_8435,onnx::Concat_8484,onnx::Concat_8489,onnx::Concat_8508,onnx::Concat_8557,onnx::Concat_8562,onnx::Concat_8581,onnx::Concat_8630,onnx::Concat_8635,onnx::Concat_8654,onnx::Concat_8703,onnx::Concat_8708,onnx::Concat_8727,onnx::Concat_8784,onnx::Concat_8789,onnx::Concat_8808,onnx::Concat_8843,onnx::Concat_8844,onnx::Concat_8845,onnx::Concat_8846,onnx::Concat_8851,onnx::Concat_8870,onnx::Concat_8883,onnx::Concat_8884,onnx::Concat_8885,onnx::Concat_8886,onnx::Concat_8891,onnx::Concat_8910,onnx::Concat_8923,onnx::Concat_8924,onnx::Concat_8925,onnx::Concat_8926,onnx::Concat_8931,onnx::Concat_8950,onnx::Concat_8964,onnx::Concat_8965,onnx::Concat_8966,onnx::Concat_8967,onnx::Concat_8972,onnx::Concat_8991,onnx::Concat_9004,onnx::Concat_9005,onnx::Concat_9006,onnx::Concat_9007,onnx::Concat_9012,onnx::Concat_9031,onnx::Concat_9044,onnx::Concat_9045,onnx::Concat_9046,onnx::Concat_9047,onnx::Concat_9052,onnx::Concat_9071,onnx::Concat_9085,onnx::Concat_9086,onnx::Concat_9087,onnx::Concat_9088,onnx::Concat_9093,onnx::Concat_9112,onnx::Concat_9125,onnx::Concat_9126,onnx::Concat_9127,onnx::Concat_9128,onnx::Concat_9133,onnx::Concat_9152,onnx::Concat_9165,onnx::Concat_9166,onnx::Concat_9167,onnx::Concat_9168,onnx::Concat_9173,onnx::Concat_9192">
+		<layer id="183" name="/time_embedding/linear_1/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="200" name="Constant_90582" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_923"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/time_embedding/linear_1/Gemm_output_0">
 					<dim>2</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="201" name="Constant_90583" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="202" name="Gather_90584" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_923"/>
-			</rt_info>
+		<layer id="184" name="/time_embedding/act/Mul" type="Swish" version="opset4">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="1" precision="FP32" names="/time_embedding/act/Mul_output_0">
 					<dim>2</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="203" name="onnx::Reshape_923" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_923"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="185" name="time_embedding.linear_2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="2965552" size="3276800" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_923">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="204" name="onnx::Transpose_924" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="186" name="time_embedding.linear_2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_924"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_924">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="205" name="Constant_1533" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1533"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32" names="time_embedding.linear_2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="206" name="onnx::Reshape_925" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_925"/>
-			</rt_info>
+		<layer id="187" name="/time_embedding/linear_2/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_925">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="207" name="onnx::Div_926" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_926"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_926"/>
-			</output>
-		</layer>
-		<layer id="208" name="onnx::Cast_927" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_927, onnx::Cast_928, onnx::Div_926, onnx::Unsqueeze_929"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_927,onnx::Cast_928,onnx::Unsqueeze_929"/>
-			</output>
-		</layer>
-		<layer id="209" name="onnx::Unsqueeze_932" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_932"/>
-			</rt_info>
+		<layer id="188" name="Constant_86876_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="6242352" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_932">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="210" name="onnx::Concat_933" type="Unsqueeze" version="opset1">
+		<layer id="189" name="Constant_86876" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_933, onnx::Unsqueeze_932"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_933">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="211" name="Constant_88095" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1416, onnx::Concat_935, onnx::Gather_905, onnx::Unsqueeze_906, onnx::Unsqueeze_934"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="212" name="Constant_1416" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1416"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="213" name="onnx::Unsqueeze_906" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1416, onnx::Concat_935, onnx::Gather_905, onnx::Unsqueeze_906, onnx::Unsqueeze_934"/>
-			</rt_info>
+		<layer id="190" name="/time_embedding/linear_2/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_935">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/time_embedding/linear_2/Gemm_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="214" name="onnx::Gather_908" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_908"/>
-			</rt_info>
+		<layer id="191" name="/down_blocks.0/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_908"/>
+				<port id="1" precision="FP32" names="/down_blocks.0/resnets.0/nonlinearity_1/Mul_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="215" name="Constant_1420" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1420"/>
-			</rt_info>
+		<layer id="192" name="down_blocks.0.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="6244912" size="819200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="216" name="onnx::Unsqueeze_909" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="193" name="down_blocks.0.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1420, onnx::Gather_908, onnx::Unsqueeze_909"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_909"/>
-			</output>
-		</layer>
-		<layer id="217" name="onnx::Mul_930" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_930"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_930"/>
+				<port id="1" precision="FP32" names="down_blocks.0.resnets.0.time_emb_proj.weight">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="218" name="onnx::Unsqueeze_931" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_930, onnx::Unsqueeze_931"/>
-			</rt_info>
+		<layer id="194" name="/down_blocks.0/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_931"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="219" name="onnx::Unsqueeze_936" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_936"/>
-			</rt_info>
+		<layer id="195" name="Constant_86877_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320" offset="7064112" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_936">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="220" name="onnx::Concat_937" type="Unsqueeze" version="opset1">
+		<layer id="196" name="Constant_86877" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_937, onnx::Unsqueeze_936"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_937">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="221" name="onnx::Reshape_938" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_938"/>
-			</rt_info>
+		<layer id="197" name="/down_blocks.0/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_938">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/time_emb_proj/Gemm_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="222" name="onnx::MatMul_939" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_939"/>
-			</rt_info>
+		<layer id="198" name="/down_blocks.0/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.0/Constant_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="199" name="/down_blocks.0/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_939">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="223" name="Constant_147435" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="19464620" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_941, onnx::MatMul_8339"/>
-			</rt_info>
+		<layer id="200" name="/down_blocks.0/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.0/Constant_1_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="224" name="onnx::Add_941" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_941, onnx::MatMul_8339"/>
-			</rt_info>
+		<layer id="201" name="/down_blocks.0/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_941">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="225" name="input.44" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.44"/>
-			</rt_info>
+		<layer id="202" name="/down_blocks.0/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.44">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/Add_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="226" name="input.48" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.48"/>
-			</rt_info>
+		<layer id="203" name="/down_blocks.0/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.0/norm2/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="204" name="/down_blocks.0/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.48">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="227" name="Constant_1654" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1654"/>
-			</rt_info>
+		<layer id="205" name="Constant_13730" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="228" name="onnx::Mul_952" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_948, onnx::Div_951, onnx::Mul_952, onnx::Pow_945, onnx::ReduceMean_947, onnx::Sqrt_950, onnx::Sub_944"/>
-			</rt_info>
+		<layer id="206" name="MVN_13731" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_952">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="229" name="Constant_150296" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="19874220" size="1280"/>
-			<output>
+		<layer id="207" name="/down_blocks.0/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="I64" names="/down_blocks.0/resnets.0/norm2/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="230" name="onnx::Add_953" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_953"/>
-			</rt_info>
+		<layer id="208" name="/down_blocks.0/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_953">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="231" name="Constant_150297" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="19875500" size="1280"/>
+		<layer id="209" name="Constant_86878_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="7064760" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="232" name="onnx::MatMul_954" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="210" name="Constant_86878" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_954"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="211" name="/down_blocks.0/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_954">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="233" name="Constant_147443" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="19876780" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8340, q.7"/>
-			</rt_info>
+		<layer id="212" name="Constant_86879_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="7065400" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="234" name="q.7" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="213" name="Constant_86879" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8340, q.7"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="214" name="/down_blocks.0/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.7">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="235" name="Constant_107061" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="236" name="onnx::Gather_961" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_961, onnx::Gather_964, onnx::Gather_967"/>
-			</rt_info>
+		<layer id="215" name="/down_blocks.0/resnets.0/nonlinearity_2/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_961,onnx::Gather_964,onnx::Gather_967">
-					<dim>3</dim>
+				<port id="1" precision="FP32" names="/down_blocks.0/resnets.0/nonlinearity_2/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="237" name="onnx::Gather_968" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_968"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_968"/>
-			</output>
-		</layer>
-		<layer id="238" name="Constant_1680" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1680"/>
-			</rt_info>
+		<layer id="216" name="down_blocks.0.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="7066040" size="1843200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="239" name="onnx::Div_969" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="217" name="down_blocks.0.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1680, onnx::Div_969, onnx::Gather_968"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_969"/>
-			</output>
-		</layer>
-		<layer id="240" name="onnx::Div_970" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_970"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_970"/>
+				<port id="1" precision="FP32" names="down_blocks.0.resnets.0.conv2.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="241" name="onnx::Cast_971" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_971, onnx::Cast_972, onnx::Div_970, onnx::Unsqueeze_973"/>
-			</rt_info>
+		<layer id="218" name="/down_blocks.0/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_971,onnx::Cast_972,onnx::Unsqueeze_973"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="242" name="onnx::Unsqueeze_981" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_981"/>
-			</rt_info>
+		<layer id="219" name="Reshape_13855_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="8909240" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_981">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="243" name="onnx::Concat_982" type="Unsqueeze" version="opset1">
+		<layer id="220" name="Reshape_13855" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_982, onnx::Unsqueeze_981"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_982">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="244" name="onnx::Reshape_983" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_983"/>
-			</rt_info>
+		<layer id="221" name="/down_blocks.0/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/conv2/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="245" name="onnx::Transpose_984" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_984"/>
-			</rt_info>
+		<layer id="222" name="/down_blocks.0/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_984">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.0/Add_1_output_0,/down_blocks.0/resnets.0/Div_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="246" name="Constant_1793" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1793"/>
-			</rt_info>
+		<layer id="223" name="/down_blocks.0/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/norm/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="247" name="onnx::Reshape_985" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_985"/>
-			</rt_info>
+		<layer id="224" name="/down_blocks.0/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_985">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/norm/Reshape_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="248" name="onnx::Gather_962" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_962"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_962"/>
-			</output>
-		</layer>
-		<layer id="249" name="Constant_1672" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1672"/>
-			</rt_info>
+		<layer id="225" name="Constant_13895" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="250" name="onnx::Unsqueeze_963" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1672, onnx::Gather_962, onnx::Unsqueeze_963"/>
-			</rt_info>
+		<layer id="226" name="MVN_13896" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_963"/>
-			</output>
-		</layer>
-		<layer id="251" name="onnx::Mul_986" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_986"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_986"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/norm/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="252" name="onnx::Unsqueeze_987" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_986, onnx::Unsqueeze_987"/>
-			</rt_info>
+		<layer id="227" name="/down_blocks.0/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_987"/>
-			</output>
-		</layer>
-		<layer id="253" name="onnx::Unsqueeze_992" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_992"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_992">
-					<dim>1</dim>
+				<port id="1" precision="I64" names="/down_blocks.0/attentions.0/norm/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="254" name="onnx::Concat_993" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_993, onnx::Unsqueeze_992"/>
-			</rt_info>
+		<layer id="228" name="/down_blocks.0/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_993">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/norm/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="255" name="Constant_88122" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1676, onnx::Concat_995, onnx::Gather_965, onnx::Unsqueeze_966, onnx::Unsqueeze_994"/>
-			</rt_info>
+		<layer id="229" name="Constant_86880_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="8909880" size="640" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="256" name="Constant_1676" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1676"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="257" name="onnx::Unsqueeze_966" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="230" name="Constant_86880" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1676, onnx::Concat_995, onnx::Gather_965, onnx::Unsqueeze_966, onnx::Unsqueeze_994"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_995">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="258" name="onnx::Div_988" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_988"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_988"/>
-			</output>
-		</layer>
-		<layer id="259" name="onnx::Cast_989" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_989, onnx::Cast_990, onnx::Div_988, onnx::Unsqueeze_991"/>
-			</rt_info>
+		<layer id="231" name="/down_blocks.0/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_989,onnx::Cast_990,onnx::Unsqueeze_991"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/norm/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="260" name="onnx::Unsqueeze_996" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_996"/>
-			</rt_info>
+		<layer id="232" name="Constant_86881_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="8910520" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_996">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="261" name="onnx::Concat_997" type="Unsqueeze" version="opset1">
+		<layer id="233" name="Constant_86881" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_997, onnx::Unsqueeze_996"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_997">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="262" name="onnx::Reshape_998" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_998"/>
-			</rt_info>
+		<layer id="234" name="/down_blocks.0/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_998">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/norm/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="263" name="q.11" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.11"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
+		<layer id="235" name="down_blocks.0.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="8911160" size="204800" />
 			<output>
-				<port id="2" precision="FP32" names="q.11">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="264" name="Constant_147450" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="20286380" size="983040"/>
+		<layer id="236" name="down_blocks.0.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.7, onnx::MatMul_8341"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32" names="down_blocks.0.attentions.0.proj_in.weight">
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="265" name="k.7" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.7, onnx::MatMul_8341"/>
-			</rt_info>
+		<layer id="237" name="/down_blocks.0/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.7">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="266" name="onnx::Reshape_8357" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="21269420" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8357"/>
-			</rt_info>
+		<layer id="238" name="Reshape_14018_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="9115960" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8348,onnx::Reshape_8357,onnx::Reshape_8421,onnx::Reshape_8430,onnx::Reshape_9098,onnx::Reshape_9107,onnx::Reshape_9138,onnx::Reshape_9147,onnx::Reshape_9178,onnx::Reshape_9187">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="267" name="onnx::Transpose_1013" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="239" name="Reshape_14018" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1013"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="240" name="/down_blocks.0/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1013">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="268" name="Constant_1917" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1917"/>
-			</rt_info>
+		<layer id="241" name="Constant_14046" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="269" name="onnx::Reshape_1014" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1014"/>
-			</rt_info>
+		<layer id="242" name="/down_blocks.0/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1014">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/Transpose_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="270" name="onnx::Reshape_8361" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="21269452" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8361"/>
-			</rt_info>
+		<layer id="243" name="/down_blocks.0/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8352,onnx::Reshape_8361,onnx::Reshape_8425,onnx::Reshape_8434,onnx::Reshape_9102,onnx::Reshape_9111,onnx::Reshape_9142,onnx::Reshape_9151,onnx::Reshape_9182,onnx::Reshape_9191">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="271" name="k.11" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.11"/>
-			</rt_info>
+		<layer id="244" name="/down_blocks.0/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.11">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/Reshape_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="272" name="onnx::Mul_1050" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1050"/>
-			</rt_info>
+		<layer id="245" name="Constant_14055" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="246" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1050">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm1/Div_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="273" name="Constant_150298" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
+		<layer id="247" name="Constant_86882_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="9116664" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="274" name="onnx::Softmax_1052" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="248" name="Constant_86882" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_1052"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_1052">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="275" name="attn.3" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.3"/>
-			</rt_info>
+		<layer id="249" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.3">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="250" name="Constant_86883_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="9117304" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="276" name="Constant_147457" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="21269476" size="983040"/>
+		<layer id="251" name="Constant_86883" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8342, v.7"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
 			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="252" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="253" name="Constant_84954_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="9117944" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="277" name="v.7" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="254" name="Constant_84954" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8342, v.7"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="255" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.7">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="278" name="onnx::Transpose_1038" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1038"/>
-			</rt_info>
+		<layer id="256" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="257" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -4311,33 +4046,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1038">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>8</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="279" name="Constant_1928" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1928"/>
-			</rt_info>
+		<layer id="258" name="Constant_14078" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="280" name="onnx::Reshape_1039" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1039"/>
-			</rt_info>
+		<layer id="259" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>8</dim>
 					<dim>40</dim>
 				</port>
@@ -4346,24 +4075,29 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1039">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="281" name="v.11" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.11"/>
-			</rt_info>
+		<layer id="260" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="261" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -4371,250 +4105,309 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.11">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="282" name="out.3" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
+		<layer id="262" name="Constant_84961_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="9322832" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="263" name="Constant_84961" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="out.3"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="264" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.3">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="283" name="onnx::Gather_1055" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1055, onnx::Gather_1058, onnx::Gather_1061"/>
-			</rt_info>
+		<layer id="265" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="266" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1055,onnx::Gather_1058,onnx::Gather_1061">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="284" name="onnx::Gather_1056" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1056"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1056"/>
-			</output>
-		</layer>
-		<layer id="285" name="Constant_1940" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1940"/>
-			</rt_info>
+		<layer id="267" name="Constant_14094" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="286" name="onnx::Div_1057" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1940, onnx::Div_1057, onnx::Gather_1056"/>
-			</rt_info>
+		<layer id="268" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_1057"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="287" name="onnx::Div_1064" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1064"/>
-			</rt_info>
+		<layer id="269" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1064"/>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="288" name="onnx::Cast_1065" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1065, onnx::Cast_1066, onnx::Div_1064, onnx::Unsqueeze_1067"/>
-			</rt_info>
+		<layer id="270" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1065,onnx::Cast_1066,onnx::Unsqueeze_1067"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="289" name="onnx::Unsqueeze_1069" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1069"/>
-			</rt_info>
+		<layer id="271" name="Constant_86884_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1069">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="290" name="onnx::Concat_1070" type="Unsqueeze" version="opset1">
+		<layer id="272" name="Constant_86884" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1070, onnx::Unsqueeze_1069"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1070">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="291" name="Constant_90592" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1077"/>
-			</rt_info>
+		<layer id="273" name="Multiply_86175" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="292" name="Constant_90593" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="274" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Add_output_0,/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="293" name="Gather_90594" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1077"/>
-			</rt_info>
+		<layer id="275" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="276" name="Constant_84968_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="9527634" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="294" name="onnx::Reshape_1077" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="277" name="Constant_84968" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1077"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1077">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="295" name="onnx::Transpose_1078" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1078"/>
-			</rt_info>
+		<layer id="278" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1078">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="296" name="Constant_2061" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2061"/>
-			</rt_info>
+		<layer id="279" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="297" name="onnx::Reshape_1079" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1079"/>
-			</rt_info>
+		<layer id="280" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1079">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>8</dim>
@@ -4622,205 +4415,154 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="298" name="onnx::Div_1080" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1080"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1080"/>
-			</output>
-		</layer>
-		<layer id="299" name="onnx::Cast_1081" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1081, onnx::Cast_1082, onnx::Div_1080, onnx::Unsqueeze_1083"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1081,onnx::Cast_1082,onnx::Unsqueeze_1083"/>
-			</output>
-		</layer>
-		<layer id="300" name="onnx::Unsqueeze_1086" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1086"/>
-			</rt_info>
+		<layer id="281" name="Constant_14110" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1086">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="301" name="onnx::Concat_1087" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1087, onnx::Unsqueeze_1086"/>
-			</rt_info>
+		<layer id="282" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1087">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="302" name="Constant_88149" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1944, onnx::Concat_1089, onnx::Gather_1059, onnx::Unsqueeze_1060, onnx::Unsqueeze_1088"/>
-			</rt_info>
+		<layer id="283" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="303" name="Constant_1944" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1944"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="304" name="onnx::Unsqueeze_1060" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1944, onnx::Concat_1089, onnx::Gather_1059, onnx::Unsqueeze_1060, onnx::Unsqueeze_1088"/>
-			</rt_info>
+		<layer id="284" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1089">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="305" name="onnx::Gather_1062" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1062"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1062"/>
-			</output>
-		</layer>
-		<layer id="306" name="Constant_1948" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1948"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="307" name="onnx::Unsqueeze_1063" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_1948, onnx::Gather_1062, onnx::Unsqueeze_1063"/>
-			</rt_info>
+		<layer id="285" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1063"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="308" name="onnx::Mul_1084" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1084"/>
-			</rt_info>
+		<layer id="286" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1084"/>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="309" name="onnx::Unsqueeze_1085" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1084, onnx::Unsqueeze_1085"/>
-			</rt_info>
+		<layer id="287" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1085"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="310" name="onnx::Unsqueeze_1090" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1090"/>
-			</rt_info>
+		<layer id="288" name="Constant_14136" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1090">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="311" name="onnx::Concat_1091" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1091, onnx::Unsqueeze_1090"/>
-			</rt_info>
+		<layer id="289" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1091">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="312" name="onnx::Reshape_1092" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1092"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="290" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1092">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="313" name="onnx::MatMul_1093" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1093"/>
-			</rt_info>
+		<layer id="291" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -4833,30 +4575,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1093">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="314" name="Constant_147464" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="22252516" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1095, onnx::MatMul_8363"/>
-			</rt_info>
+		<layer id="292" name="Constant_84975_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="9732466" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="315" name="onnx::Add_1095" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="293" name="Constant_84975" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1095, onnx::MatMul_8363"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="294" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -4869,18 +4623,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1095">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="316" name="input.52" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.52"/>
-			</rt_info>
+		<layer id="295" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -4894,18 +4645,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.52">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="317" name="input.56" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.56"/>
-			</rt_info>
+		<layer id="296" name="/down_blocks.0/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -4919,29 +4667,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.56">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="318" name="Constant_2182" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2182"/>
-			</rt_info>
+		<layer id="297" name="Constant_14148" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="319" name="onnx::Mul_1106" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1102, onnx::Div_1105, onnx::Mul_1106, onnx::Pow_1099, onnx::ReduceMean_1101, onnx::Sqrt_1104, onnx::Sub_1098"/>
-			</rt_info>
+		<layer id="298" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -4953,28 +4695,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1106">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="320" name="Constant_150300" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="22662116" size="1280"/>
+		<layer id="299" name="Constant_86886_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="9937266" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="321" name="onnx::Add_1107" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="300" name="Constant_86886" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1107"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="301" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -4988,28 +4747,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1107">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="322" name="Constant_150301" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="22663396" size="1280"/>
+		<layer id="302" name="Constant_86887_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="9937906" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="323" name="onnx::MatMul_1108" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="303" name="Constant_86887" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1108"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="304" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -5023,30 +4799,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1108">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="324" name="Constant_147472" type="Const" version="opset1">
-			<data element_type="f32" shape="2560, 320" offset="22664676" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1110, onnx::MatMul_8364"/>
-			</rt_info>
+		<layer id="305" name="Constant_84983_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="9938546" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>2560</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="325" name="onnx::Add_1110" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="306" name="Constant_84983" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1110, onnx::MatMul_8364"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="307" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -5054,517 +4842,625 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2560</dim>
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1110">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="326" name="onnx::Shape_1111" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_1111"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>2560</dim>
+		<layer id="308" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="FP32">
+			</output>
+		</layer>
+		<layer id="309" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_1111">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="327" name="Constant_125667" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
-			</rt_info>
+		<layer id="310" name="Constant_14171" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="328" name="Constant_125668" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
-			</rt_info>
+		<layer id="311" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="329" name="Constant_125664" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
-			</rt_info>
+		<layer id="312" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="330" name="onnx::Gather_1112" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1112"/>
-			</rt_info>
+		<layer id="313" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1112">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="331" name="onnx::Gather_1113" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="314" name="Constant_84990_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="10143346" size="491520" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1113">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="332" name="Constant_2199" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="315" name="Constant_84990" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2199"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="333" name="onnx::Add_1114" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2199, onnx::Add_1114, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="316" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_1114">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="334" name="onnx::Add_1116" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1116"/>
-			</rt_info>
+		<layer id="317" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_1116">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="335" name="onnx::Div_1117" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1116, onnx::Div_1117"/>
-			</rt_info>
+		<layer id="318" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="319" name="Constant_14187" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="320" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_1117">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="336" name="onnx::Div_1118" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1118"/>
-			</rt_info>
+		<layer id="321" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1118">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="337" name="onnx::Mul_1119" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1118, onnx::Mul_1119, onnx::Mul_1120, onnx::Slice_1121"/>
-			</rt_info>
+		<layer id="322" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_1119,onnx::Slice_1121">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="338" name="Constant_125663" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
-			</rt_info>
+		<layer id="323" name="Constant_86888_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="I32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="339" name="ScatterUpdate_125669" type="ScatterUpdate" version="opset3">
+		<layer id="324" name="Constant_86888" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="340" name="Constant_125672" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
-			</rt_info>
+		<layer id="325" name="Multiply_86177" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="341" name="onnx::Mul_1122" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2210, onnx::Gather_1113, onnx::Mul_1122"/>
-			</rt_info>
+		<layer id="326" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Add_output_0,/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+			</output>
+		</layer>
+		<layer id="327" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_1122">
-					<dim>2</dim>
+				<port id="1" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="342" name="Constant_125736" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="328" name="Constant_84997_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="10634922" size="491520" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="343" name="Constant_125735" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
+		<layer id="329" name="Constant_84997" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="344" name="Constant_125734" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="330" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="345" name="ScatterUpdate_125737" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="331" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="332" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="346" name="Constant_125738" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="333" name="Constant_14203" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="347" name="onnx::Mul_1123" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1123"/>
-			</rt_info>
+		<layer id="334" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1123">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="348" name="onnx::Slice_1124" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1123, onnx::Slice_1124"/>
-			</rt_info>
+		<layer id="335" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="336" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_1124">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="349" name="ScatterUpdate_125739" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="337" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="350" name="Constant_125742" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="338" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="351" name="onnx::Div_1125" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_2279, onnx::Div_1125, onnx::Gather_1113"/>
-			</rt_info>
+		<layer id="339" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_1125">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="352" name="onnx::Mul_1133" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1128, onnx::Erf_1127, onnx::Mul_1130, onnx::Mul_1131, onnx::Mul_1133"/>
-			</rt_info>
+		<layer id="340" name="Constant_14229" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="341" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_1133">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="353" name="input.60" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.60"/>
-			</rt_info>
+		<layer id="342" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="343" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.60">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="354" name="Constant_147480" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="25941528" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1136, onnx::MatMul_8365"/>
-			</rt_info>
+		<layer id="344" name="Constant_85004_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="11126442" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="355" name="onnx::Add_1136" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="345" name="Constant_85004" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1136, onnx::MatMul_8365"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="346" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1136">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="356" name="onnx::Add_1137" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1137"/>
-			</rt_info>
+		<layer id="347" name="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -5578,18 +5474,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1137">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="357" name="onnx::Reshape_1138" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1138"/>
-			</rt_info>
+		<layer id="348" name="/down_blocks.0/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -5603,29 +5496,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1138">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="358" name="onnx::Reshape_8443" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579928" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8443"/>
-			</rt_info>
+		<layer id="349" name="Constant_14241" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8370,onnx::Reshape_8443">
-					<dim>4</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="359" name="onnx::Transpose_1148" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1148"/>
-			</rt_info>
+		<layer id="350" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -5633,491 +5520,576 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1148">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="360" name="Constant_2363" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2363"/>
-			</rt_info>
+		<layer id="351" name="Constant_86890_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="11331242" size="640" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="361" name="input.64" type="Transpose" version="opset1">
+		<layer id="352" name="Constant_86890" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.64"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.64">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="362" name="m.down_blocks.0.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="27579992" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.attentions.0.proj_out.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.attentions.0.proj_out.weight">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="363" name="Convolution_2365" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_2365"/>
-			</rt_info>
+		<layer id="353" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="364" name="Reshape_2385" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="27989592" size="1280"/>
+		<layer id="354" name="Constant_86891_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="11331882" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="365" name="onnx::Add_1150" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="355" name="Constant_86891" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2384, Reshape_2385, onnx::Add_1150"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1150">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="366" name="onnx::Cast_1151" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.68, onnx::Cast_1151"/>
-			</rt_info>
+		<layer id="356" name="/down_blocks.0/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.68,onnx::Cast_1151">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="367" name="onnx::Reshape_1153" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1153"/>
-			</rt_info>
+		<layer id="357" name="Constant_85012_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="2560, 320" offset="11332522" size="1638400" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_1153">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="368" name="onnx::InstanceNormalization_1154" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="358" name="Constant_85012" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_1154"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="359" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_1154">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="369" name="Constant_2424" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2424"/>
-			</rt_info>
+		<layer id="360" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="361" name="Constant_76208" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="362" name="Constant_76209" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="363" name="Constant_76205" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="370" name="MVN_2425" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2444, Concat_2489, MVN_2425, Multiply_2472, Reshape_2445, Reshape_2490, onnx::Reshape_1157"/>
-			</rt_info>
+		<layer id="364" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="365" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="366" name="Constant_14258" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
+			<output>
+				<port id="0" precision="I64" />
+			</output>
+		</layer>
+		<layer id="367" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
+			<input>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1157">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="3" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="371" name="onnx::Reshape_1158" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1158"/>
-			</rt_info>
+		<layer id="368" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="369" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_1158">
-					<dim>4</dim>
+				<port id="2" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="372" name="onnx::Mul_1159" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1159"/>
-			</rt_info>
+		<layer id="370" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="371" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1159">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="373" name="Constant_150304" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="27990872" size="1280"/>
+		<layer id="372" name="Constant_76204" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="I32">
 					<dim>1</dim>
-					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="373" name="ScatterUpdate_76210" type="ScatterUpdate" version="opset3">
+			<input>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="374" name="onnx::Add_1162" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1162"/>
-			</rt_info>
+		<layer id="374" name="Constant_76213" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="375" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1162">
+				<port id="4" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="375" name="Constant_150305" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="27992152" size="1280"/>
+		<layer id="376" name="Constant_76277" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="377" name="Constant_76276" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64">
 					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="378" name="Constant_76275" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
+			<output>
+				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="376" name="onnx::Cast_1165" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.72, onnx::Cast_1165"/>
-			</rt_info>
+		<layer id="379" name="ScatterUpdate_76278" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
+				<port id="1" precision="I64">
 					<dim>1</dim>
-					<dim>320</dim>
+				</port>
+				<port id="2" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.72,onnx::Cast_1165">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="377" name="input.76" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.76, onnx::Mul_1167"/>
-			</rt_info>
+		<layer id="380" name="Constant_76279" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="381" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="382" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.76">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="I64" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="378" name="m.down_blocks.0.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="27993432" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="383" name="ScatterUpdate_76280" type="ScatterUpdate" version="opset3">
+			<input>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.resnets.1.conv1.weight">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="4" precision="I64">
 					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="384" name="Constant_76283" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="379" name="Convolution_2530" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_2530"/>
-			</rt_info>
+		<layer id="385" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
 					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="4" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="380" name="Reshape_2550" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="31679832" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="381" name="onnx::Add_1169" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2549, Reshape_2550, onnx::Add_1169"/>
-			</rt_info>
+		<layer id="386" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1169">
+				<port id="1" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="382" name="onnx::Gemm_1171" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_1171, onnx::Mul_1170"/>
-			</rt_info>
+		<layer id="387" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_1171">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="383" name="m.down_blocks.0.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="31681112" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.resnets.1.time_emb_proj.weight"/>
-			</rt_info>
+		<layer id="388" name="Constant_85020_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="12970974" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.resnets.1.time_emb_proj.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="384" name="MatMul_2582" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="389" name="Constant_85020" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_2582"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="390" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
@@ -6126,114 +6098,208 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="385" name="Constant_150306" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320" offset="33319512" size="1280"/>
-			<output>
+		<layer id="391" name="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="386" name="onnx::Unsqueeze_1172" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_2583, onnx::Unsqueeze_1172"/>
-			</rt_info>
+		<layer id="392" name="/down_blocks.0/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_1172">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="387" name="onnx::Unsqueeze_1173" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1173"/>
-			</rt_info>
+		<layer id="393" name="/down_blocks.0/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790174" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1173">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.0/Constant_1_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="388" name="onnx::Unsqueeze_1174" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1174"/>
-			</rt_info>
+		<layer id="394" name="/down_blocks.0/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="395" name="Constant_14423" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="396" name="/down_blocks.0/attentions.0/Transpose_1" type="Transpose" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_1174">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="397" name="down_blocks.0.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="13790238" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="389" name="onnx::Unsqueeze_1175" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
+		<layer id="398" name="down_blocks.0.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1175"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1175">
+				<port id="1" precision="FP32" names="down_blocks.0.attentions.0.proj_out.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="390" name="onnx::Add_1176" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1176"/>
-			</rt_info>
+		<layer id="399" name="/down_blocks.0/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>320</dim>
-					<dim>1</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1176">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="400" name="Reshape_14445_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="13995038" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="391" name="onnx::Cast_1177" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="401" name="Reshape_14445" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.80, onnx::Cast_1177"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="402" name="/down_blocks.0/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6242,14 +6308,14 @@
 					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.80,onnx::Cast_1177">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6257,22 +6323,41 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="392" name="onnx::Reshape_1179" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1179"/>
-			</rt_info>
+		<layer id="403" name="/down_blocks.0/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.0/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="404" name="/down_blocks.0/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_1179">
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="393" name="onnx::InstanceNormalization_1180" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_1180"/>
-			</rt_info>
+		<layer id="405" name="/down_blocks.0/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6285,29 +6370,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_1180">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="394" name="Constant_2600" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2600"/>
-			</rt_info>
+		<layer id="406" name="Constant_14483" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="395" name="MVN_2601" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2620, Concat_2665, MVN_2601, Multiply_2648, Reshape_2621, Reshape_2666, onnx::Reshape_1183"/>
-			</rt_info>
+		<layer id="407" name="MVN_14484" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6319,18 +6398,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1183">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="396" name="onnx::Reshape_1184" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1184"/>
-			</rt_info>
+		<layer id="408" name="/down_blocks.0/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6340,16 +6416,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_1184">
+				<port id="1" precision="I64" names="/down_blocks.0/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="397" name="onnx::Mul_1185" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1185"/>
-			</rt_info>
+		<layer id="409" name="/down_blocks.0/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6361,7 +6434,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1185">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6369,10 +6442,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="398" name="Constant_150307" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="33320792" size="1280"/>
+		<layer id="410" name="Constant_86894_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="13995678" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -6380,11 +6453,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="399" name="onnx::Add_1188" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="411" name="Constant_86894" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1188"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="412" name="/down_blocks.0/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6400,7 +6492,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1188">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6408,10 +6500,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="400" name="Constant_150308" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="33322072" size="1280"/>
+		<layer id="413" name="Constant_86895_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="13996318" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -6419,11 +6511,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="401" name="onnx::Cast_1191" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="414" name="Constant_86895" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.84, onnx::Cast_1191"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="415" name="/down_blocks.0/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6439,7 +6550,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.84,onnx::Cast_1191">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6447,10 +6558,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="402" name="input.88" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.88, onnx::Mul_1193"/>
-			</rt_info>
+		<layer id="416" name="/down_blocks.0/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6460,7 +6568,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.88">
+				<port id="1" precision="FP32" names="/down_blocks.0/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6468,13 +6576,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="403" name="m.down_blocks.0.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="33323352" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="417" name="down_blocks.0.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="13996958" size="1843200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 					<dim>3</dim>
@@ -6482,11 +6587,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="404" name="Convolution_2706" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="418" name="down_blocks.0.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_2706"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.0.resnets.1.conv1.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="419" name="/down_blocks.0/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6510,10 +6634,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="405" name="Reshape_2726" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="37009752" size="1280"/>
+		<layer id="420" name="Reshape_14608_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="15840158" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -6521,11 +6645,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="406" name="onnx::Add_1195" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="421" name="Reshape_14608" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2725, Reshape_2726, onnx::Add_1195"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="422" name="/down_blocks.0/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6541,7 +6684,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1195">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6549,27 +6692,170 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="407" name="onnx::Div_1196" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="423" name="down_blocks.0.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="15840798" size="819200" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="424" name="down_blocks.0.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.0.resnets.1.time_emb_proj.weight">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="425" name="/down_blocks.0/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="426" name="Constant_86896_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320" offset="16659998" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="427" name="Constant_86896" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.92, onnx::Div_1196"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="428" name="/down_blocks.0/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/time_emb_proj/Gemm_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="429" name="/down_blocks.0/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.1/Constant_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="430" name="/down_blocks.0/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/Unsqueeze_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="431" name="/down_blocks.0/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.1/Constant_1_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="432" name="/down_blocks.0/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/Unsqueeze_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="433" name="/down_blocks.0/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
 					<dim>64</dim>
 				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.92,onnx::Div_1196">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6577,22 +6863,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="408" name="onnx::Reshape_1203" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1203"/>
-			</rt_info>
+		<layer id="434" name="/down_blocks.0/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_1203">
+				<port id="0" precision="I64" names="/down_blocks.0/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="409" name="onnx::InstanceNormalization_1204" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_1204"/>
-			</rt_info>
+		<layer id="435" name="/down_blocks.0/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6605,29 +6885,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_1204">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="410" name="Constant_2766" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2766"/>
-			</rt_info>
+		<layer id="436" name="Constant_14656" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="411" name="MVN_2767" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2786, Concat_2831, MVN_2767, Multiply_2814, Reshape_2787, Reshape_2832, onnx::Reshape_1207"/>
-			</rt_info>
+		<layer id="437" name="MVN_14657" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6639,18 +6913,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1207">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="412" name="onnx::Reshape_1208" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1208"/>
-			</rt_info>
+		<layer id="438" name="/down_blocks.0/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6660,16 +6931,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_1208">
+				<port id="1" precision="I64" names="/down_blocks.0/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="413" name="onnx::Mul_1209" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1209"/>
-			</rt_info>
+		<layer id="439" name="/down_blocks.0/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6681,7 +6949,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1209">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6689,10 +6957,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="414" name="Constant_150309" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="37011032" size="1280"/>
+		<layer id="440" name="Constant_86897_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="16660638" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -6700,11 +6968,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="415" name="onnx::Add_1212" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="441" name="Constant_86897" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1212"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="442" name="/down_blocks.0/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6720,7 +7007,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1212">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6728,10 +7015,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="416" name="Constant_150310" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="37012312" size="1280"/>
+		<layer id="443" name="Constant_86898_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="16661278" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -6739,11 +7026,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="417" name="input.96" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="444" name="Constant_86898" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.96"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="445" name="/down_blocks.0/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6759,7 +7065,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.96">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6767,25 +7073,59 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="418" name="m.down_blocks.0.attentions.1.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="37013592" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.attentions.1.proj_in.weight"/>
-			</rt_info>
+		<layer id="446" name="/down_blocks.0/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="/down_blocks.0/resnets.1/nonlinearity_1/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="447" name="down_blocks.0.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="16661918" size="1843200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.attentions.1.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="419" name="Convolution_2869" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="448" name="down_blocks.0.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_2869"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.0.resnets.1.conv2.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="449" name="/down_blocks.0/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6796,8 +7136,8 @@
 				<port id="1" precision="FP32">
 					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
@@ -6809,10 +7149,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="420" name="Reshape_2889" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="37423192" size="1280"/>
+		<layer id="450" name="Reshape_14781_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="18505118" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -6820,11 +7160,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="421" name="onnx::Transpose_1216" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="451" name="Reshape_14781" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_2888, Reshape_2889, onnx::Transpose_1216"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="452" name="/down_blocks.0/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6840,7 +7199,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1216">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -6848,21 +7207,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="422" name="Constant_2917" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2917"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="423" name="onnx::Reshape_1217" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1217"/>
-			</rt_info>
+		<layer id="453" name="/down_blocks.0/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -6870,482 +7216,567 @@
 					<dim>64</dim>
 					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1217">
+				<port id="2" precision="FP32" names="/down_blocks.0/resnets.1/Add_1_output_0,/down_blocks.0/resnets.1/Div_output_0">
 					<dim>2</dim>
+					<dim>320</dim>
 					<dim>64</dim>
 					<dim>64</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="424" name="input.100" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.100"/>
-			</rt_info>
+		<layer id="454" name="/down_blocks.0/attentions.1/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/norm/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="455" name="/down_blocks.0/attentions.1/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
 					<dim>64</dim>
 					<dim>64</dim>
-					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.100">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/norm/Reshape_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="425" name="Constant_2922" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2922"/>
-			</rt_info>
+		<layer id="456" name="Constant_14821" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="426" name="onnx::Mul_1235" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1231, onnx::Div_1234, onnx::Mul_1235, onnx::Pow_1228, onnx::ReduceMean_1230, onnx::Sqrt_1233, onnx::Sub_1227"/>
-			</rt_info>
+		<layer id="457" name="MVN_14822" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1235">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="427" name="Constant_150311" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="37424472" size="1280"/>
-			<output>
+		<layer id="458" name="/down_blocks.0/attentions.1/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="I64" names="/down_blocks.0/attentions.1/norm/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="428" name="onnx::Add_1236" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1236"/>
-			</rt_info>
+		<layer id="459" name="/down_blocks.0/attentions.1/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1236">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/norm/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="429" name="Constant_150312" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="37425752" size="1280"/>
+		<layer id="460" name="Constant_86899_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="18505758" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="430" name="onnx::MatMul_1237" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="461" name="Constant_86899" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1237"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="462" name="/down_blocks.0/attentions.1/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1237">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/norm/Mul_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="431" name="Constant_147491" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="37427032" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8381, q.15"/>
-			</rt_info>
+		<layer id="463" name="Constant_86900_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="18506398" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="432" name="q.15" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="464" name="Constant_86900" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8381, q.15"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.15">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="433" name="onnx::Transpose_1257" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1257"/>
-			</rt_info>
+		<layer id="465" name="/down_blocks.0/attentions.1/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1257">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/norm/Add_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="434" name="Constant_2941" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2941"/>
-			</rt_info>
+		<layer id="466" name="down_blocks.0.attentions.1.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="18507038" size="204800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="435" name="onnx::Reshape_1258" type="Transpose" version="opset1">
+		<layer id="467" name="down_blocks.0.attentions.1.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1258"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1258">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="1" precision="FP32" names="down_blocks.0.attentions.1.proj_in.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="436" name="q.19" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.19"/>
-			</rt_info>
+		<layer id="468" name="/down_blocks.0/attentions.1/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.19">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="437" name="Constant_147498" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="37836632" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.15, onnx::MatMul_8382"/>
-			</rt_info>
+		<layer id="469" name="Reshape_14944_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="18711838" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="438" name="k.15" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="470" name="Reshape_14944" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.15, onnx::MatMul_8382"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.15">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="439" name="onnx::Transpose_1282" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1282"/>
-			</rt_info>
+		<layer id="471" name="/down_blocks.0/attentions.1/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1282">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/proj_in/Conv_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="440" name="Constant_2949" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2949"/>
-			</rt_info>
+		<layer id="472" name="Constant_14972" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="441" name="onnx::Reshape_1283" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1283"/>
-			</rt_info>
+		<layer id="473" name="/down_blocks.0/attentions.1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1283">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="442" name="k.19" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.19"/>
-			</rt_info>
+		<layer id="474" name="/down_blocks.0/attentions.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="475" name="/down_blocks.0/attentions.1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.19">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/Reshape_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="443" name="onnx::Mul_1319" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1319"/>
-			</rt_info>
+		<layer id="476" name="Constant_14981" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="477" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1319">
-					<dim>16</dim>
-					<dim>4096</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm1/Div_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="444" name="Constant_150313" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
+		<layer id="478" name="Constant_86901_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="18712478" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="445" name="onnx::Softmax_1321" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="479" name="Constant_86901" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_1321"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="480" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_1321">
-					<dim>16</dim>
-					<dim>4096</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm1/Mul_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="481" name="Constant_86902_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="18713118" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="446" name="attn.7" type="SoftMax" version="opset8">
-			<data axis="-1"/>
+		<layer id="482" name="Constant_86902" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.7"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="483" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.7">
-					<dim>16</dim>
-					<dim>4096</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm1/Add_1_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="447" name="Constant_147505" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="38246232" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8383, v.15"/>
-			</rt_info>
+		<layer id="484" name="Constant_85030_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="18713758" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="448" name="v.15" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="485" name="Constant_85030" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8383, v.15"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="486" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7358,18 +7789,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.15">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="449" name="onnx::Transpose_1307" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1307"/>
-			</rt_info>
+		<layer id="487" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="488" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7381,7 +7817,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1307">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>8</dim>
@@ -7389,21 +7825,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="450" name="Constant_2957" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2957"/>
-			</rt_info>
+		<layer id="489" name="Constant_15004" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="451" name="onnx::Reshape_1308" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1308"/>
-			</rt_info>
+		<layer id="490" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7416,7 +7846,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1308">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>4096</dim>
@@ -7424,11 +7854,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="452" name="v.19" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.19"/>
-			</rt_info>
+		<layer id="491" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="492" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7441,250 +7876,309 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.19">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="453" name="out.7" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
+		<layer id="493" name="Constant_85037_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="18918558" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="494" name="Constant_85037" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="out.7"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="495" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.7">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_k/MatMul_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="454" name="onnx::Gather_1324" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1324, onnx::Gather_1327, onnx::Gather_1330"/>
-			</rt_info>
+		<layer id="496" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="497" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1324,onnx::Gather_1327,onnx::Gather_1330">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="455" name="onnx::Gather_1325" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1325"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1325"/>
-			</output>
-		</layer>
-		<layer id="456" name="Constant_2969" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2969"/>
-			</rt_info>
+		<layer id="498" name="Constant_15020" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="457" name="onnx::Div_1326" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2969, onnx::Div_1326, onnx::Gather_1325"/>
-			</rt_info>
+		<layer id="499" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_1326"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="458" name="onnx::Div_1333" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1333"/>
-			</rt_info>
+		<layer id="500" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1333"/>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="459" name="onnx::Cast_1334" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1334, onnx::Cast_1335, onnx::Div_1333, onnx::Unsqueeze_1336"/>
-			</rt_info>
+		<layer id="501" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1334,onnx::Cast_1335,onnx::Unsqueeze_1336"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="460" name="onnx::Unsqueeze_1338" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1338"/>
-			</rt_info>
+		<layer id="502" name="Constant_86903_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1338">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="461" name="onnx::Concat_1339" type="Unsqueeze" version="opset1">
+		<layer id="503" name="Constant_86903" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1339, onnx::Unsqueeze_1338"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1339">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="462" name="Constant_90597" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1346"/>
-			</rt_info>
+		<layer id="504" name="Multiply_86179" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="463" name="Constant_90598" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="505" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Add_output_0,/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="464" name="Gather_90599" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1346"/>
-			</rt_info>
+		<layer id="506" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Cast_output_0,/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="507" name="Constant_85044_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="19123358" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="465" name="onnx::Reshape_1346" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="508" name="Constant_85044" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1346"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1346">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="466" name="onnx::Transpose_1347" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1347"/>
-			</rt_info>
+		<layer id="509" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1347">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="467" name="Constant_3090" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3090"/>
-			</rt_info>
+		<layer id="510" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_4_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="468" name="onnx::Reshape_1348" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1348"/>
-			</rt_info>
+		<layer id="511" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1348">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>8</dim>
@@ -7692,205 +8186,154 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="469" name="onnx::Div_1349" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1349"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1349"/>
-			</output>
-		</layer>
-		<layer id="470" name="onnx::Cast_1350" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1350, onnx::Cast_1351, onnx::Div_1349, onnx::Unsqueeze_1352"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1350,onnx::Cast_1351,onnx::Unsqueeze_1352"/>
-			</output>
-		</layer>
-		<layer id="471" name="onnx::Unsqueeze_1355" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1355"/>
-			</rt_info>
+		<layer id="512" name="Constant_15036" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1355">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="472" name="onnx::Concat_1356" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1356, onnx::Unsqueeze_1355"/>
-			</rt_info>
+		<layer id="513" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1356">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="473" name="Constant_88176" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2973, onnx::Concat_1358, onnx::Gather_1328, onnx::Unsqueeze_1329, onnx::Unsqueeze_1357"/>
-			</rt_info>
+		<layer id="514" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="474" name="Constant_2973" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2973"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="475" name="onnx::Unsqueeze_1329" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2973, onnx::Concat_1358, onnx::Gather_1328, onnx::Unsqueeze_1329, onnx::Unsqueeze_1357"/>
-			</rt_info>
+		<layer id="515" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1358">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="476" name="onnx::Gather_1331" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1331"/>
-			</output>
-		</layer>
-		<layer id="477" name="Constant_2977" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2977"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="478" name="onnx::Unsqueeze_1332" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_2977, onnx::Gather_1331, onnx::Unsqueeze_1332"/>
-			</rt_info>
+		<layer id="516" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1332"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="479" name="onnx::Mul_1353" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1353"/>
-			</rt_info>
+		<layer id="517" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1353"/>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="480" name="onnx::Unsqueeze_1354" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1353, onnx::Unsqueeze_1354"/>
-			</rt_info>
+		<layer id="518" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1354"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="481" name="onnx::Unsqueeze_1359" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1359"/>
-			</rt_info>
+		<layer id="519" name="Constant_15062" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1359">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="482" name="onnx::Concat_1360" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1360, onnx::Unsqueeze_1359"/>
-			</rt_info>
+		<layer id="520" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1360">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="483" name="onnx::Reshape_1361" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1361"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="521" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1361">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="484" name="onnx::MatMul_1362" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1362"/>
-			</rt_info>
+		<layer id="522" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7903,30 +8346,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1362">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="485" name="Constant_147512" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="38655832" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1364, onnx::MatMul_8412"/>
-			</rt_info>
+		<layer id="523" name="Constant_85051_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="19328158" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="486" name="onnx::Add_1364" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="524" name="Constant_85051" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1364, onnx::MatMul_8412"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="525" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7939,18 +8394,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1364">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="487" name="input.104" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.104"/>
-			</rt_info>
+		<layer id="526" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -7964,18 +8416,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.104">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="488" name="input.108" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.108"/>
-			</rt_info>
+		<layer id="527" name="/down_blocks.0/attentions.1/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -7989,29 +8438,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.108">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="489" name="Constant_3211" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3211"/>
-			</rt_info>
+		<layer id="528" name="Constant_15074" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="490" name="onnx::Mul_1375" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1371, onnx::Div_1374, onnx::Mul_1375, onnx::Pow_1368, onnx::ReduceMean_1370, onnx::Sqrt_1373, onnx::Sub_1367"/>
-			</rt_info>
+		<layer id="529" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8023,63 +8466,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1375">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="491" name="Constant_150315" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="39065432" size="1280"/>
+		<layer id="530" name="Constant_86905_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="19532958" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="492" name="onnx::Add_1376" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="531" name="Constant_86905" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1376"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1376">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="493" name="Constant_150316" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="39066712" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="494" name="onnx::MatMul_1377" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1377"/>
-			</rt_info>
+		<layer id="532" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8093,185 +8518,123 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1377">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="495" name="Constant_147520" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="39067992" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8413, q.23"/>
-			</rt_info>
+		<layer id="533" name="Constant_86906_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="19533598" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="496" name="q.23" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="534" name="Constant_86906" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8413, q.23"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.23">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="497" name="Constant_107130" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="498" name="onnx::Gather_1384" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1384, onnx::Gather_1387, onnx::Gather_1390"/>
-			</rt_info>
+		<layer id="535" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1384,onnx::Gather_1387,onnx::Gather_1390">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm2/Add_1_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="499" name="onnx::Gather_1391" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1391"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1391"/>
-			</output>
-		</layer>
-		<layer id="500" name="Constant_3237" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3237"/>
-			</rt_info>
+		<layer id="536" name="Constant_85059_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="19534238" size="204800" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="501" name="onnx::Div_1392" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3237, onnx::Div_1392, onnx::Gather_1391"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_1392"/>
-			</output>
-		</layer>
-		<layer id="502" name="onnx::Div_1393" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1393"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1393"/>
 			</output>
 		</layer>
-		<layer id="503" name="onnx::Cast_1394" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="537" name="Constant_85059" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1394, onnx::Cast_1395, onnx::Div_1393, onnx::Unsqueeze_1396"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1394,onnx::Cast_1395,onnx::Unsqueeze_1396"/>
-			</output>
-		</layer>
-		<layer id="504" name="onnx::Unsqueeze_1404" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1404"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1404">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="505" name="onnx::Concat_1405" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1405, onnx::Unsqueeze_1404"/>
-			</rt_info>
+		<layer id="538" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1405">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_q/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="506" name="onnx::Reshape_1406" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1406"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="539" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="3" precision="I64">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="507" name="onnx::Transpose_1407" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1407"/>
-			</rt_info>
+		<layer id="540" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8283,7 +8646,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1407">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>8</dim>
@@ -8291,21 +8654,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="508" name="Constant_3350" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3350"/>
-			</rt_info>
+		<layer id="541" name="Constant_15097" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="509" name="onnx::Reshape_1408" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1408"/>
-			</rt_info>
+		<layer id="542" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8318,7 +8675,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1408">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>4096</dim>
@@ -8326,205 +8683,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="510" name="onnx::Gather_1385" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1385"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1385"/>
-			</output>
-		</layer>
-		<layer id="511" name="Constant_3229" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3229"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="512" name="onnx::Unsqueeze_1386" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3229, onnx::Gather_1385, onnx::Unsqueeze_1386"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1386"/>
-			</output>
-		</layer>
-		<layer id="513" name="onnx::Mul_1409" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1409"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1409"/>
-			</output>
-		</layer>
-		<layer id="514" name="onnx::Unsqueeze_1410" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1409, onnx::Unsqueeze_1410"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1410"/>
-			</output>
-		</layer>
-		<layer id="515" name="onnx::Unsqueeze_1415" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1415"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1415">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="516" name="onnx::Concat_1416" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1416, onnx::Unsqueeze_1415"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1416">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="517" name="Constant_88203" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3233, onnx::Concat_1418, onnx::Gather_1388, onnx::Unsqueeze_1389, onnx::Unsqueeze_1417"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="518" name="Constant_3233" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3233"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="519" name="onnx::Unsqueeze_1389" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3233, onnx::Concat_1418, onnx::Gather_1388, onnx::Unsqueeze_1389, onnx::Unsqueeze_1417"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1418">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="520" name="onnx::Div_1411" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1411"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1411"/>
-			</output>
-		</layer>
-		<layer id="521" name="onnx::Cast_1412" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1412, onnx::Cast_1413, onnx::Div_1411, onnx::Unsqueeze_1414"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1412,onnx::Cast_1413,onnx::Unsqueeze_1414"/>
-			</output>
-		</layer>
-		<layer id="522" name="onnx::Unsqueeze_1419" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1419"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1419">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="523" name="onnx::Concat_1420" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1420, onnx::Unsqueeze_1419"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1420">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="524" name="onnx::Reshape_1421" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1421"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="543" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1421">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="525" name="q.27" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.27"/>
-			</rt_info>
+		<layer id="544" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8537,30 +8705,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.27">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="526" name="Constant_147527" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="39477592" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.23, onnx::MatMul_8414"/>
-			</rt_info>
+		<layer id="545" name="Constant_85066_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="19739038" size="491520" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="527" name="k.23" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="546" name="Constant_85066" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.23, onnx::MatMul_8414"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="547" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8573,18 +8753,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.23">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="528" name="onnx::Transpose_1436" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1436"/>
-			</rt_info>
+		<layer id="548" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="549" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8596,7 +8781,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1436">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -8604,21 +8789,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="529" name="Constant_3471" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3471"/>
-			</rt_info>
+		<layer id="550" name="Constant_15113" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="530" name="onnx::Reshape_1437" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1437"/>
-			</rt_info>
+		<layer id="551" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8631,7 +8810,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1437">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -8639,11 +8818,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="531" name="k.27" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.27"/>
-			</rt_info>
+		<layer id="552" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="553" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8656,78 +8840,89 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.27">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="532" name="onnx::Mul_1473" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="554" name="Constant_86907_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="555" name="Constant_86907" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1473"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1473">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="533" name="Constant_150317" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
-			<output>
+		<layer id="556" name="Multiply_86181" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="534" name="onnx::Softmax_1475" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_1475"/>
-			</rt_info>
+		<layer id="557" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_1475">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Add_output_0,/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>4096</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="535" name="attn.11" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.11"/>
-			</rt_info>
+		<layer id="558" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -8736,30 +8931,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.11">
+				<port id="1" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Cast_output_0,/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
 					<dim>4096</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="536" name="Constant_147534" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="40460632" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8415, v.23"/>
-			</rt_info>
+		<layer id="559" name="Constant_85073_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="20230558" size="491520" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="537" name="v.23" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="560" name="Constant_85073" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8415, v.23"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="561" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8772,18 +8979,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.23">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="538" name="onnx::Transpose_1461" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1461"/>
-			</rt_info>
+		<layer id="562" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="563" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8795,7 +9007,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1461">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -8803,21 +9015,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="539" name="Constant_3479" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3479"/>
-			</rt_info>
+		<layer id="564" name="Constant_15129" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="540" name="onnx::Reshape_1462" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1462"/>
-			</rt_info>
+		<layer id="565" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8830,7 +9036,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1462">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -8838,11 +9044,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="541" name="v.27" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.27"/>
-			</rt_info>
+		<layer id="566" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="567" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -8855,18 +9066,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.27">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="542" name="out.11" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.11"/>
-			</rt_info>
+		<layer id="568" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -8880,467 +9088,252 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.11">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="543" name="onnx::Gather_1478" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1478, onnx::Gather_1481, onnx::Gather_1484"/>
-			</rt_info>
+		<layer id="569" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="570" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1478,onnx::Gather_1481,onnx::Gather_1484">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="544" name="onnx::Gather_1479" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1479"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1479"/>
-			</output>
-		</layer>
-		<layer id="545" name="Constant_3491" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3491"/>
-			</rt_info>
+		<layer id="571" name="Constant_15155" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="546" name="onnx::Div_1480" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3491, onnx::Div_1480, onnx::Gather_1479"/>
-			</rt_info>
+		<layer id="572" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_1480"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="547" name="onnx::Div_1487" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1487"/>
-			</rt_info>
+		<layer id="573" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1487"/>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="548" name="onnx::Cast_1488" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1488, onnx::Cast_1489, onnx::Div_1487, onnx::Unsqueeze_1490"/>
-			</rt_info>
+		<layer id="574" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1488,onnx::Cast_1489,onnx::Unsqueeze_1490"/>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="549" name="onnx::Unsqueeze_1492" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1492"/>
-			</rt_info>
+		<layer id="575" name="Constant_85080_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="20722078" size="204800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1492">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="550" name="onnx::Concat_1493" type="Unsqueeze" version="opset1">
+		<layer id="576" name="Constant_85080" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1493, onnx::Unsqueeze_1492"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1493">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="551" name="Constant_90607" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1500"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="552" name="Constant_90608" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="553" name="Gather_90609" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1500"/>
-			</rt_info>
+		<layer id="577" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="554" name="onnx::Reshape_1500" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1500"/>
-			</rt_info>
+		<layer id="578" name="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1500">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="555" name="onnx::Transpose_1501" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1501"/>
-			</rt_info>
+		<layer id="579" name="/down_blocks.0/attentions.1/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1501">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="556" name="Constant_3612" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3612"/>
-			</rt_info>
+		<layer id="580" name="Constant_15167" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="557" name="onnx::Reshape_1502" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1502"/>
-			</rt_info>
+		<layer id="581" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1502">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="558" name="onnx::Div_1503" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1503"/>
-			</rt_info>
+		<layer id="582" name="Constant_86909_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="20926878" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1503"/>
-			</output>
-		</layer>
-		<layer id="559" name="onnx::Cast_1504" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1504, onnx::Cast_1505, onnx::Div_1503, onnx::Unsqueeze_1506"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1504,onnx::Cast_1505,onnx::Unsqueeze_1506"/>
-			</output>
-		</layer>
-		<layer id="560" name="onnx::Unsqueeze_1509" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1509"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1509">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="561" name="onnx::Concat_1510" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1510, onnx::Unsqueeze_1509"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1510">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="562" name="Constant_88230" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3495, onnx::Concat_1512, onnx::Gather_1482, onnx::Unsqueeze_1483, onnx::Unsqueeze_1511"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="563" name="Constant_3495" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3495"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="564" name="onnx::Unsqueeze_1483" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3495, onnx::Concat_1512, onnx::Gather_1482, onnx::Unsqueeze_1483, onnx::Unsqueeze_1511"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1512">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="565" name="onnx::Gather_1485" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1485"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1485"/>
-			</output>
-		</layer>
-		<layer id="566" name="Constant_3499" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3499"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="567" name="onnx::Unsqueeze_1486" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3499, onnx::Gather_1485, onnx::Unsqueeze_1486"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1486"/>
-			</output>
-		</layer>
-		<layer id="568" name="onnx::Mul_1507" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1507"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1507"/>
-			</output>
-		</layer>
-		<layer id="569" name="onnx::Unsqueeze_1508" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="583" name="Constant_86909" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1507, onnx::Unsqueeze_1508"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1508"/>
-			</output>
-		</layer>
-		<layer id="570" name="onnx::Unsqueeze_1513" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1513"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1513">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="571" name="onnx::Concat_1514" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1514, onnx::Unsqueeze_1513"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1514">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="572" name="onnx::Reshape_1515" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1515"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1515">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="573" name="onnx::MatMul_1516" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1516"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1516">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="574" name="Constant_147541" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="41443672" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1518, onnx::MatMul_8436"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="575" name="onnx::Add_1518" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1518, onnx::MatMul_8436"/>
-			</rt_info>
+		<layer id="584" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9348,117 +9341,51 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1518">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="576" name="input.112" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.112"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.112">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="577" name="input.116" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.116"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.116">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="578" name="Constant_3733" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3733"/>
-			</rt_info>
+		<layer id="585" name="Constant_86910_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="20927518" size="640" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="579" name="onnx::Mul_1529" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="586" name="Constant_86910" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1525, onnx::Div_1528, onnx::Mul_1529, onnx::Pow_1522, onnx::ReduceMean_1524, onnx::Sqrt_1527, onnx::Sub_1521"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1529">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="580" name="Constant_150319" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="41853272" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="581" name="onnx::Add_1530" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1530"/>
-			</rt_info>
+		<layer id="587" name="/down_blocks.0/attentions.1/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9472,65 +9399,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1530">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="582" name="Constant_150320" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="41854552" size="1280"/>
+		<layer id="588" name="Constant_85088_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="2560, 320" offset="20928158" size="1638400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="583" name="onnx::MatMul_1531" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="589" name="Constant_85088" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1531"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1531">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="584" name="Constant_147549" type="Const" version="opset1">
-			<data element_type="f32" shape="2560, 320" offset="41855832" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1533, onnx::MatMul_8437"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>2560</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="585" name="onnx::Add_1533" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1533, onnx::MatMul_8437"/>
-			</rt_info>
+		<layer id="590" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9543,18 +9447,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1533">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="586" name="onnx::Shape_1534" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_1534"/>
-			</rt_info>
+		<layer id="591" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -9568,51 +9469,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_1534">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="587" name="Constant_125868" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="592" name="Constant_76409" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="588" name="Constant_125869" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="593" name="Constant_76410" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="589" name="Constant_125865" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="594" name="Constant_76406" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="590" name="onnx::Gather_1535" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1535"/>
-			</rt_info>
+		<layer id="595" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9621,36 +9510,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1535">
+				<port id="1" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="591" name="onnx::Gather_1536" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="596" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1536">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="592" name="Constant_3750" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3750"/>
-			</rt_info>
+		<layer id="597" name="Constant_15184" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="593" name="onnx::Add_1537" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3750, onnx::Add_1537, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="598" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -9658,30 +9538,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_1537">
+				<port id="3" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="594" name="onnx::Add_1539" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1539"/>
-			</rt_info>
+		<layer id="599" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_1539">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="595" name="onnx::Div_1540" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1539, onnx::Div_1540"/>
-			</rt_info>
+		<layer id="600" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -9691,27 +9565,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_1540">
+				<port id="2" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="596" name="onnx::Div_1541" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1541"/>
-			</rt_info>
+		<layer id="601" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1541">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="597" name="onnx::Mul_1542" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1541, onnx::Mul_1542, onnx::Mul_1543, onnx::Slice_1544"/>
-			</rt_info>
+		<layer id="602" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -9721,26 +9589,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_1542,onnx::Slice_1544">
+				<port id="2" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Div_output_0,/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="598" name="Constant_125864" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="603" name="Constant_76405" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="599" name="ScatterUpdate_125870" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="604" name="ScatterUpdate_76411" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -9761,22 +9623,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="600" name="Constant_125873" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="605" name="Constant_76414" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="601" name="onnx::Mul_1545" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3761, onnx::Gather_1536, onnx::Mul_1545"/>
-			</rt_info>
+		<layer id="606" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9794,50 +9650,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_1545">
+				<port id="4" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="602" name="Constant_125937" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="607" name="Constant_76478" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="603" name="Constant_125936" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="608" name="Constant_76477" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="604" name="Constant_125935" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="609" name="Constant_76476" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="605" name="ScatterUpdate_125938" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="610" name="ScatterUpdate_76479" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -9858,33 +9702,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="606" name="Constant_125939" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="611" name="Constant_76480" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="607" name="onnx::Mul_1546" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1546"/>
-			</rt_info>
+		<layer id="612" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1546">
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="608" name="onnx::Slice_1547" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1546, onnx::Slice_1547"/>
-			</rt_info>
+		<layer id="613" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -9894,15 +9729,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_1547">
+				<port id="2" precision="I64" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="609" name="ScatterUpdate_125940" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="614" name="ScatterUpdate_76481" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -9923,22 +9755,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="610" name="Constant_125943" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="615" name="Constant_76484" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="611" name="onnx::Div_1548" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_3830, onnx::Div_1548, onnx::Gather_1536"/>
-			</rt_info>
+		<layer id="616" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9956,18 +9782,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_1548">
+				<port id="4" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="612" name="onnx::Mul_1556" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1551, onnx::Erf_1550, onnx::Mul_1553, onnx::Mul_1554, onnx::Mul_1556"/>
-			</rt_info>
+		<layer id="617" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -9976,18 +9799,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_1556">
+				<port id="1" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="613" name="input.120" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.120"/>
-			</rt_info>
+		<layer id="618" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10001,30 +9821,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.120">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="614" name="Constant_147557" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="45132632" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1559, onnx::MatMul_8438"/>
-			</rt_info>
+		<layer id="619" name="Constant_85096_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="22566558" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="615" name="onnx::Add_1559" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="620" name="Constant_85096" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1559, onnx::MatMul_8438"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="621" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10037,18 +9869,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1559">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="616" name="onnx::Add_1560" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1560"/>
-			</rt_info>
+		<layer id="622" name="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -10062,18 +9891,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1560">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="617" name="onnx::Reshape_1561" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1561"/>
-			</rt_info>
+		<layer id="623" name="/down_blocks.0/attentions.1/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10087,18 +9913,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1561">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="618" name="onnx::Transpose_1571" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1571"/>
-			</rt_info>
+		<layer id="624" name="/down_blocks.0/attentions.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790174" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.0/attentions.1/Constant_1_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="625" name="/down_blocks.0/attentions.1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10110,7 +9941,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1571">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>64</dim>
@@ -10118,21 +9949,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="619" name="Constant_3911" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_3911"/>
-			</rt_info>
+		<layer id="626" name="Constant_15349" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="620" name="input.124" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.124"/>
-			</rt_info>
+		<layer id="627" name="/down_blocks.0/attentions.1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10145,7 +9970,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.124">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -10153,13 +9978,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="621" name="m.down_blocks.0.attentions.1.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="46771032" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.attentions.1.proj_out.weight"/>
-			</rt_info>
+		<layer id="628" name="down_blocks.0.attentions.1.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="23385758" size="204800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.attentions.1.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -10167,11 +9989,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="622" name="Convolution_3913" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="629" name="down_blocks.0.attentions.1.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_3913"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.0.attentions.1.proj_out.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="630" name="/down_blocks.0/attentions.1/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10195,10 +10036,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="623" name="Reshape_3933" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="47180632" size="1280"/>
+		<layer id="631" name="Reshape_15371_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="23590558" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -10206,11 +10047,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="624" name="onnx::Add_1573" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="632" name="Reshape_15371" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_3932, Reshape_3933, onnx::Add_1573"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="633" name="/down_blocks.0/attentions.1/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10226,7 +10086,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1573">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -10234,11 +10094,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="625" name="input.128" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.128"/>
-			</rt_info>
+		<layer id="634" name="/down_blocks.0/attentions.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10254,7 +10111,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.128">
+				<port id="2" precision="FP32" names="/down_blocks.0/attentions.1/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -10262,13 +10119,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="626" name="m.down_blocks.0.downsamplers.0.conv.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="47181912" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.0.downsamplers.0.conv.weight"/>
-			</rt_info>
+		<layer id="635" name="down_blocks.0.downsamplers.0.conv.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="23591198" size="1843200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.0.downsamplers.0.conv.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 					<dim>3</dim>
@@ -10276,11 +10130,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="627" name="Convolution_3962" type="Convolution" version="opset1">
-			<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="636" name="down_blocks.0.downsamplers.0.conv.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_3962"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.0.downsamplers.0.conv.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="637" name="/down_blocks.0/downsamplers.0/conv/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10304,10 +10177,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="628" name="Reshape_3982" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="50868312" size="1280"/>
+		<layer id="638" name="Reshape_15420_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="25434398" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -10315,11 +10188,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="629" name="onnx::Cast_1575" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="639" name="Reshape_15420" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_3981, Reshape_3982, input.132, onnx::Cast_1575"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="640" name="/down_blocks.0/downsamplers.0/conv/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10335,7 +10227,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.132,onnx::Cast_1575">
+				<port id="2" precision="FP32" names="/down_blocks.0/downsamplers.0/conv/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>32</dim>
@@ -10343,13 +10235,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="630" name="m.down_blocks.1.resnets.0.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 320, 1, 1" offset="50869592" size="819200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.0.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="641" name="down_blocks.1.resnets.0.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 320, 1, 1" offset="25435038" size="409600" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.0.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -10357,11 +10246,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="631" name="Convolution_4350" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="642" name="down_blocks.1.resnets.0.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_4350"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.0.conv_shortcut.weight">
+					<dim>640</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="643" name="/down_blocks.1/resnets.0/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10385,10 +10293,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="632" name="Reshape_4370" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="51688792" size="2560"/>
+		<layer id="644" name="Reshape_15803_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="25844638" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -10396,11 +10304,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="633" name="onnx::Add_1620" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="645" name="Reshape_15803" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4369, Reshape_4370, onnx::Add_1620"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="646" name="/down_blocks.1/resnets.0/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10416,7 +10343,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1620">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -10424,22 +10351,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="634" name="onnx::Reshape_1577" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1577"/>
-			</rt_info>
+		<layer id="647" name="/down_blocks.1/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_1577">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="635" name="onnx::InstanceNormalization_1578" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_1578"/>
-			</rt_info>
+		<layer id="648" name="/down_blocks.1/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10452,29 +10373,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_1578">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="636" name="Constant_4020" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4020"/>
-			</rt_info>
+		<layer id="649" name="Constant_15457" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="637" name="MVN_4021" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4040, Concat_4085, MVN_4021, Multiply_4068, Reshape_4041, Reshape_4086, onnx::Reshape_1581"/>
-			</rt_info>
+		<layer id="650" name="MVN_15458" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10486,18 +10401,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1581">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="638" name="onnx::Reshape_1582" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1582"/>
-			</rt_info>
+		<layer id="651" name="/down_blocks.1/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10507,16 +10419,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_1582">
+				<port id="1" precision="I64" names="/down_blocks.1/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="639" name="onnx::Mul_1583" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1583"/>
-			</rt_info>
+		<layer id="652" name="/down_blocks.1/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10528,7 +10437,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1583">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>32</dim>
@@ -10536,10 +10445,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="640" name="Constant_150323" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="51691352" size="1280"/>
+		<layer id="653" name="Constant_86913_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="25845918" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -10547,11 +10456,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="641" name="onnx::Add_1586" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="654" name="Constant_86913" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1586"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="655" name="/down_blocks.1/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10567,7 +10495,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1586">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>32</dim>
@@ -10575,10 +10503,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="642" name="Constant_150324" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="51692632" size="1280"/>
+		<layer id="656" name="Constant_86914_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="25846558" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -10586,11 +10514,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="643" name="onnx::Cast_1589" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="657" name="Constant_86914" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.136, onnx::Cast_1589"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="658" name="/down_blocks.1/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10606,7 +10553,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.136,onnx::Cast_1589">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>32</dim>
@@ -10614,10 +10561,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="644" name="input.140" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.140, onnx::Mul_1591"/>
-			</rt_info>
+		<layer id="659" name="/down_blocks.1/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10627,7 +10571,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.140">
+				<port id="1" precision="FP32" names="/down_blocks.1/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>32</dim>
@@ -10635,13 +10579,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="645" name="m.down_blocks.1.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 320, 3, 3" offset="51693912" size="7372800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="660" name="down_blocks.1.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 320, 3, 3" offset="25847198" size="3686400" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>320</dim>
 					<dim>3</dim>
@@ -10649,11 +10590,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="646" name="Convolution_4126" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="661" name="down_blocks.1.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_4126"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.0.conv1.weight">
+					<dim>640</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="662" name="/down_blocks.1/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10677,10 +10637,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="647" name="Reshape_4146" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="59066712" size="2560"/>
+		<layer id="663" name="Reshape_15582_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="29533598" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -10688,11 +10648,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="648" name="onnx::Add_1593" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="664" name="Reshape_15582" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4145, Reshape_4146, onnx::Add_1593"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="665" name="/down_blocks.1/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10708,7 +10687,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1593">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -10716,40 +10695,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="649" name="onnx::Gemm_1595" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_1595, onnx::Mul_1594"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="666" name="down_blocks.1.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280" offset="29534878" size="1638400" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_1595">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="650" name="m.down_blocks.1.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280" offset="59069272" size="3276800"/>
+		<layer id="667" name="down_blocks.1.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.0.time_emb_proj.weight">
 					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="651" name="MatMul_4178" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_4178"/>
-			</rt_info>
+		<layer id="668" name="/down_blocks.1/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10767,20 +10741,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="652" name="Constant_150325" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640" offset="62346072" size="2560"/>
+		<layer id="669" name="Constant_86915_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640" offset="31173278" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="653" name="onnx::Unsqueeze_1596" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="670" name="Constant_86915" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_4179, onnx::Unsqueeze_1596"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="671" name="/down_blocks.1/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10792,27 +10781,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_1596">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="654" name="onnx::Unsqueeze_1597" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1597"/>
-			</rt_info>
+		<layer id="672" name="/down_blocks.1/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1597">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="655" name="onnx::Unsqueeze_1598" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1598"/>
-			</rt_info>
+		<layer id="673" name="/down_blocks.1/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10823,28 +10806,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_1598">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="656" name="onnx::Unsqueeze_1599" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1599"/>
-			</rt_info>
+		<layer id="674" name="/down_blocks.1/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1599">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="657" name="onnx::Add_1600" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1600"/>
-			</rt_info>
+		<layer id="675" name="/down_blocks.1/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10856,7 +10833,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1600">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -10864,11 +10841,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="658" name="onnx::Cast_1601" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.144, onnx::Cast_1601"/>
-			</rt_info>
+		<layer id="676" name="/down_blocks.1/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10884,7 +10858,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.144,onnx::Cast_1601">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -10892,22 +10866,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="659" name="onnx::Reshape_1603" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1603"/>
-			</rt_info>
+		<layer id="677" name="/down_blocks.1/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_1603">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.0/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="660" name="onnx::InstanceNormalization_1604" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_1604"/>
-			</rt_info>
+		<layer id="678" name="/down_blocks.1/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10920,29 +10888,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_1604">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="661" name="Constant_4196" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4196"/>
-			</rt_info>
+		<layer id="679" name="Constant_15630" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="662" name="MVN_4197" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4216, Concat_4261, MVN_4197, Multiply_4244, Reshape_4217, Reshape_4262, onnx::Reshape_1607"/>
-			</rt_info>
+		<layer id="680" name="MVN_15631" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10954,18 +10916,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1607">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="663" name="onnx::Reshape_1608" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1608"/>
-			</rt_info>
+		<layer id="681" name="/down_blocks.1/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10975,16 +10934,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_1608">
+				<port id="1" precision="I64" names="/down_blocks.1/resnets.0/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="664" name="onnx::Mul_1609" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1609"/>
-			</rt_info>
+		<layer id="682" name="/down_blocks.1/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -10996,7 +10952,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1609">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11004,10 +10960,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="665" name="Constant_150326" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="62348632" size="2560"/>
+		<layer id="683" name="Constant_86916_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="31174558" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11015,11 +10971,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="666" name="onnx::Add_1612" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="684" name="Constant_86916" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1612"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="685" name="/down_blocks.1/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11035,7 +11010,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1612">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11043,10 +11018,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="667" name="Constant_150327" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="62351192" size="2560"/>
+		<layer id="686" name="Constant_86917_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="31175838" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11054,11 +11029,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="668" name="onnx::Cast_1615" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="687" name="Constant_86917" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.148, onnx::Cast_1615"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="688" name="/down_blocks.1/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11074,7 +11068,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.148,onnx::Cast_1615">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11082,10 +11076,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="669" name="input.152" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.152, onnx::Mul_1617"/>
-			</rt_info>
+		<layer id="689" name="/down_blocks.1/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11095,7 +11086,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.152">
+				<port id="1" precision="FP32" names="/down_blocks.1/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11103,13 +11094,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="670" name="m.down_blocks.1.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="62353752" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="690" name="down_blocks.1.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="31177118" size="7372800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>3</dim>
@@ -11117,11 +11105,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="671" name="Convolution_4302" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="691" name="down_blocks.1.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_4302"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.0.conv2.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="692" name="/down_blocks.1/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11145,10 +11152,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="672" name="Reshape_4322" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="77099352" size="2560"/>
+		<layer id="693" name="Reshape_15755_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="38549918" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11156,11 +11163,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="673" name="onnx::Add_1619" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="694" name="Reshape_15755" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4321, Reshape_4322, onnx::Add_1619"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="695" name="/down_blocks.1/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11176,7 +11202,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1619">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11184,11 +11210,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="674" name="onnx::Div_1621" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.156, onnx::Div_1621"/>
-			</rt_info>
+		<layer id="696" name="/down_blocks.1/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11204,7 +11227,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.156,onnx::Div_1621">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.0/Add_1_output_0,/down_blocks.1/resnets.0/Div_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11212,22 +11235,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="675" name="onnx::Reshape_1628" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1628"/>
-			</rt_info>
+		<layer id="697" name="/down_blocks.1/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_1628">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="676" name="onnx::InstanceNormalization_1629" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_1629"/>
-			</rt_info>
+		<layer id="698" name="/down_blocks.1/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11240,29 +11257,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_1629">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="677" name="Constant_4410" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4410"/>
-			</rt_info>
+		<layer id="699" name="Constant_15843" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="678" name="MVN_4411" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4430, Concat_4475, MVN_4411, Multiply_4458, Reshape_4431, Reshape_4476, onnx::Reshape_1632"/>
-			</rt_info>
+		<layer id="700" name="MVN_15844" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11274,18 +11285,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1632">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="679" name="onnx::Reshape_1633" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1633"/>
-			</rt_info>
+		<layer id="701" name="/down_blocks.1/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11295,16 +11303,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_1633">
+				<port id="1" precision="I64" names="/down_blocks.1/attentions.0/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="680" name="onnx::Mul_1634" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1634"/>
-			</rt_info>
+		<layer id="702" name="/down_blocks.1/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11316,7 +11321,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1634">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11324,10 +11329,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="681" name="Constant_150328" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="77101912" size="2560"/>
+		<layer id="703" name="Constant_86918_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="38551198" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11335,11 +11340,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="682" name="onnx::Add_1637" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="704" name="Constant_86918" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1637"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="705" name="/down_blocks.1/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11355,7 +11379,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1637">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11363,10 +11387,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="683" name="Constant_150329" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="77104472" size="2560"/>
+		<layer id="706" name="Constant_86919_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="38552478" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11374,11 +11398,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="684" name="input.160" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="707" name="Constant_86919" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.160"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="708" name="/down_blocks.1/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11394,7 +11437,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.160">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11402,13 +11445,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="685" name="m.down_blocks.1.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="77107032" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.attentions.0.proj_in.weight"/>
-			</rt_info>
+		<layer id="709" name="down_blocks.1.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="38553758" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.attentions.0.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11416,11 +11456,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="686" name="Convolution_4513" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="710" name="down_blocks.1.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_4513"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.attentions.0.proj_in.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="711" name="/down_blocks.1/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11444,10 +11503,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="687" name="Reshape_4533" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="78745432" size="2560"/>
+		<layer id="712" name="Reshape_15966_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="39372958" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -11455,11 +11514,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="688" name="onnx::Transpose_1641" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="713" name="Reshape_15966" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_4532, Reshape_4533, onnx::Transpose_1641"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="714" name="/down_blocks.1/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11475,7 +11553,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1641">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -11483,21 +11561,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="689" name="Constant_4561" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4561"/>
-			</rt_info>
+		<layer id="715" name="Constant_15994" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="690" name="onnx::Reshape_1642" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1642"/>
-			</rt_info>
+		<layer id="716" name="/down_blocks.1/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11510,7 +11582,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1642">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/Transpose_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>32</dim>
@@ -11518,22 +11590,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="691" name="onnx::Reshape_8526" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="78747992" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8526"/>
-			</rt_info>
+		<layer id="717" name="/down_blocks.1/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8453,onnx::Reshape_8526">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="692" name="input.164" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.164"/>
-			</rt_info>
+		<layer id="718" name="/down_blocks.1/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11546,29 +11612,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.164">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="693" name="Constant_4569" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4569"/>
-			</rt_info>
+		<layer id="719" name="Constant_16003" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="694" name="onnx::Mul_1660" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1656, onnx::Div_1659, onnx::Mul_1660, onnx::Pow_1653, onnx::ReduceMean_1655, onnx::Sqrt_1658, onnx::Sub_1652"/>
-			</rt_info>
+		<layer id="720" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11580,28 +11640,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1660">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="695" name="Constant_150330" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="78748016" size="2560"/>
+		<layer id="721" name="Constant_86920_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="39374262" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="696" name="onnx::Add_1661" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="722" name="Constant_86920" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1661"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="723" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11615,28 +11692,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1661">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="697" name="Constant_150331" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="78750576" size="2560"/>
+		<layer id="724" name="Constant_86921_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="39375542" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="698" name="onnx::MatMul_1662" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="725" name="Constant_86921" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1662"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="726" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11650,30 +11744,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1662">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="699" name="Constant_147568" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="78753136" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8454, q.31"/>
-			</rt_info>
+		<layer id="727" name="Constant_85106_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="39376822" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="700" name="q.31" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="728" name="Constant_85106" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8454, q.31"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="729" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11686,29 +11792,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.31">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="701" name="onnx::Reshape_8470" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="80391536" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8470"/>
-			</rt_info>
+		<layer id="730" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8461,onnx::Reshape_8470,onnx::Reshape_8479,onnx::Reshape_8534,onnx::Reshape_8543,onnx::Reshape_8552">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="702" name="onnx::Transpose_1682" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1682"/>
-			</rt_info>
+		<layer id="731" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11720,7 +11820,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1682">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -11728,21 +11828,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="703" name="Constant_4591" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4591"/>
-			</rt_info>
+		<layer id="732" name="Constant_16026" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="704" name="onnx::Reshape_1683" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1683"/>
-			</rt_info>
+		<layer id="733" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11755,7 +11849,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1683">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -11763,22 +11857,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="705" name="onnx::Reshape_8474" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="80391568" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8474"/>
-			</rt_info>
+		<layer id="734" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8465,onnx::Reshape_8474,onnx::Reshape_8483,onnx::Reshape_8538,onnx::Reshape_8547,onnx::Reshape_8556">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="706" name="q.35" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.35"/>
-			</rt_info>
+		<layer id="735" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11791,30 +11879,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.35">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="707" name="Constant_147575" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="80391592" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.31, onnx::MatMul_8455"/>
-			</rt_info>
+		<layer id="736" name="Constant_85113_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="40196078" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="708" name="k.31" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="737" name="Constant_85113" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.31, onnx::MatMul_8455"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="738" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11827,18 +11927,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.31">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="709" name="onnx::Transpose_1707" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1707"/>
-			</rt_info>
+		<layer id="739" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="740" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11850,7 +11955,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1707">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -11858,21 +11963,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="710" name="Constant_4602" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4602"/>
-			</rt_info>
+		<layer id="741" name="Constant_16042" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="711" name="onnx::Reshape_1708" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1708"/>
-			</rt_info>
+		<layer id="742" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11885,7 +11984,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1708">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -11893,11 +11992,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="712" name="k.35" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.35"/>
-			</rt_info>
+		<layer id="743" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="744" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -11910,18 +12014,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.35">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="713" name="onnx::Mul_1744" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="745" name="Constant_86922_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="746" name="Constant_86922" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1744"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="747" name="Multiply_86183" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -11929,59 +12060,43 @@
 					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1744">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="714" name="Constant_150332" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="715" name="onnx::Softmax_1746" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_1746"/>
-			</rt_info>
+		<layer id="748" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_1746">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Add_output_0,/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="716" name="attn.15" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.15"/>
-			</rt_info>
+		<layer id="749" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -11990,30 +12105,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.15">
+				<port id="1" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="717" name="Constant_147582" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="82029996" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8456, v.31"/>
-			</rt_info>
+		<layer id="750" name="Constant_85120_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="41015280" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="718" name="v.31" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="751" name="Constant_85120" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8456, v.31"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="752" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12026,18 +12153,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.31">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="719" name="onnx::Transpose_1732" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1732"/>
-			</rt_info>
+		<layer id="753" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="754" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12049,7 +12181,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1732">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -12057,21 +12189,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="720" name="Constant_4610" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4610"/>
-			</rt_info>
+		<layer id="755" name="Constant_16058" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="721" name="onnx::Reshape_1733" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1733"/>
-			</rt_info>
+		<layer id="756" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12084,7 +12210,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1733">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -12092,11 +12218,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="722" name="v.35" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.35"/>
-			</rt_info>
+		<layer id="757" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="758" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12109,18 +12240,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.35">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="723" name="out.15" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.15"/>
-			</rt_info>
+		<layer id="759" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -12134,178 +12262,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.15">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="724" name="onnx::Gather_1749" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1749, onnx::Gather_1752, onnx::Gather_1755"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1749,onnx::Gather_1752,onnx::Gather_1755">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="725" name="onnx::Gather_1750" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1750"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1750"/>
-			</output>
-		</layer>
-		<layer id="726" name="Constant_4622" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4622"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="727" name="onnx::Div_1751" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4622, onnx::Div_1751, onnx::Gather_1750"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_1751"/>
-			</output>
-		</layer>
-		<layer id="728" name="onnx::Div_1758" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1758"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1758"/>
-			</output>
-		</layer>
-		<layer id="729" name="onnx::Cast_1759" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1759, onnx::Cast_1760, onnx::Div_1758, onnx::Unsqueeze_1761"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1759,onnx::Cast_1760,onnx::Unsqueeze_1761"/>
-			</output>
-		</layer>
-		<layer id="730" name="onnx::Unsqueeze_1763" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1763"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1763">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="731" name="onnx::Concat_1764" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1764, onnx::Unsqueeze_1763"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1764">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="732" name="Constant_90612" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1771"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="733" name="Constant_90613" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="734" name="Gather_90614" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1771"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="735" name="onnx::Reshape_1771" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1771"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="760" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1771">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="736" name="onnx::Transpose_1772" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1772"/>
-			</rt_info>
+		<layer id="761" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -12317,7 +12290,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1772">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -12325,21 +12298,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="737" name="Constant_4743" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4743"/>
-			</rt_info>
+		<layer id="762" name="Constant_16084" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="738" name="onnx::Reshape_1773" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1773"/>
-			</rt_info>
+		<layer id="763" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12352,7 +12319,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1773">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -12360,205 +12327,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="739" name="onnx::Div_1774" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1774"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1774"/>
-			</output>
-		</layer>
-		<layer id="740" name="onnx::Cast_1775" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1775, onnx::Cast_1776, onnx::Div_1774, onnx::Unsqueeze_1777"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1775,onnx::Cast_1776,onnx::Unsqueeze_1777"/>
-			</output>
-		</layer>
-		<layer id="741" name="onnx::Unsqueeze_1780" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1780"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1780">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="742" name="onnx::Concat_1781" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1781, onnx::Unsqueeze_1780"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1781">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="743" name="Constant_88257" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4626, onnx::Concat_1783, onnx::Gather_1753, onnx::Unsqueeze_1754, onnx::Unsqueeze_1782"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="744" name="Constant_4626" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4626"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="745" name="onnx::Unsqueeze_1754" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4626, onnx::Concat_1783, onnx::Gather_1753, onnx::Unsqueeze_1754, onnx::Unsqueeze_1782"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1783">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="746" name="onnx::Gather_1756" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1756"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1756"/>
-			</output>
-		</layer>
-		<layer id="747" name="Constant_4630" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4630"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="748" name="onnx::Unsqueeze_1757" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4630, onnx::Gather_1756, onnx::Unsqueeze_1757"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1757"/>
-			</output>
-		</layer>
-		<layer id="749" name="onnx::Mul_1778" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1778"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1778"/>
-			</output>
-		</layer>
-		<layer id="750" name="onnx::Unsqueeze_1779" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1778, onnx::Unsqueeze_1779"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1779"/>
-			</output>
-		</layer>
-		<layer id="751" name="onnx::Unsqueeze_1784" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1784"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1784">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="752" name="onnx::Concat_1785" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1785, onnx::Unsqueeze_1784"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1785">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="753" name="onnx::Reshape_1786" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1786"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="764" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1786">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="754" name="onnx::MatMul_1787" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1787"/>
-			</rt_info>
+		<layer id="765" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12571,30 +12349,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1787">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="755" name="Constant_147589" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="83668396" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1789, onnx::MatMul_8485"/>
-			</rt_info>
+		<layer id="766" name="Constant_85127_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="41834512" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="756" name="onnx::Add_1789" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="767" name="Constant_85127" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1789, onnx::MatMul_8485"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="768" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12607,18 +12397,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1789">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="757" name="input.168" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.168"/>
-			</rt_info>
+		<layer id="769" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -12632,18 +12419,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.168">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="758" name="input.172" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.172"/>
-			</rt_info>
+		<layer id="770" name="/down_blocks.1/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12657,29 +12441,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.172">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="759" name="Constant_4864" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4864"/>
-			</rt_info>
+		<layer id="771" name="Constant_16096" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="760" name="onnx::Mul_1800" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1796, onnx::Div_1799, onnx::Mul_1800, onnx::Pow_1793, onnx::ReduceMean_1795, onnx::Sqrt_1798, onnx::Sub_1792"/>
-			</rt_info>
+		<layer id="772" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12691,28 +12469,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1800">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="761" name="Constant_150334" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="85306796" size="2560"/>
+		<layer id="773" name="Constant_86924_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="42653712" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="762" name="onnx::Add_1801" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="774" name="Constant_86924" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1801"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="775" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12726,28 +12521,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1801">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="763" name="Constant_150335" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="85309356" size="2560"/>
+		<layer id="776" name="Constant_86925_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="42654992" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="764" name="onnx::MatMul_1802" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="777" name="Constant_86925" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1802"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="778" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12761,185 +12573,71 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1802">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="765" name="Constant_147597" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="85311916" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8486, q.39"/>
-			</rt_info>
+		<layer id="779" name="Constant_85135_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="42656272" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="766" name="q.39" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="780" name="Constant_85135" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8486, q.39"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.39">
-					<dim>2</dim>
-					<dim>1024</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="767" name="Constant_107199" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="768" name="onnx::Gather_1809" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1809, onnx::Gather_1812, onnx::Gather_1815"/>
-			</rt_info>
+		<layer id="781" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1809,onnx::Gather_1812,onnx::Gather_1815">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="769" name="onnx::Gather_1816" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1816"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1816"/>
-			</output>
-		</layer>
-		<layer id="770" name="Constant_4890" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4890"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="771" name="onnx::Div_1817" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4890, onnx::Div_1817, onnx::Gather_1816"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_1817"/>
-			</output>
-		</layer>
-		<layer id="772" name="onnx::Div_1818" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1818"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1818"/>
-			</output>
-		</layer>
-		<layer id="773" name="onnx::Cast_1819" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1819, onnx::Cast_1820, onnx::Div_1818, onnx::Unsqueeze_1821"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1819,onnx::Cast_1820,onnx::Unsqueeze_1821"/>
-			</output>
-		</layer>
-		<layer id="774" name="onnx::Unsqueeze_1829" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1829"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1829">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="775" name="onnx::Concat_1830" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1830, onnx::Unsqueeze_1829"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1830">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="776" name="onnx::Reshape_1831" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1831"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="782" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="3" precision="I64">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="777" name="onnx::Transpose_1832" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1832"/>
-			</rt_info>
+		<layer id="783" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12951,7 +12649,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1832">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -12959,21 +12657,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="778" name="Constant_5003" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5003"/>
-			</rt_info>
+		<layer id="784" name="Constant_16119" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="779" name="onnx::Reshape_1833" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1833"/>
-			</rt_info>
+		<layer id="785" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -12986,7 +12678,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1833">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -12994,205 +12686,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="780" name="onnx::Gather_1810" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1810"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1810"/>
-			</output>
-		</layer>
-		<layer id="781" name="Constant_4882" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4882"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="782" name="onnx::Unsqueeze_1811" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4882, onnx::Gather_1810, onnx::Unsqueeze_1811"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1811"/>
-			</output>
-		</layer>
-		<layer id="783" name="onnx::Mul_1834" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1834"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1834"/>
-			</output>
-		</layer>
-		<layer id="784" name="onnx::Unsqueeze_1835" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1834, onnx::Unsqueeze_1835"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1835"/>
-			</output>
-		</layer>
-		<layer id="785" name="onnx::Unsqueeze_1840" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1840"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1840">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="786" name="onnx::Concat_1841" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1841, onnx::Unsqueeze_1840"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1841">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="787" name="Constant_88284" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4886, onnx::Concat_1843, onnx::Gather_1813, onnx::Unsqueeze_1814, onnx::Unsqueeze_1842"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="788" name="Constant_4886" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4886"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="789" name="onnx::Unsqueeze_1814" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_4886, onnx::Concat_1843, onnx::Gather_1813, onnx::Unsqueeze_1814, onnx::Unsqueeze_1842"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1843">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="790" name="onnx::Div_1836" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1836"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1836"/>
-			</output>
-		</layer>
-		<layer id="791" name="onnx::Cast_1837" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1837, onnx::Cast_1838, onnx::Div_1836, onnx::Unsqueeze_1839"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1837,onnx::Cast_1838,onnx::Unsqueeze_1839"/>
-			</output>
-		</layer>
-		<layer id="792" name="onnx::Unsqueeze_1844" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1844"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1844">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="793" name="onnx::Concat_1845" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1845, onnx::Unsqueeze_1844"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1845">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="794" name="onnx::Reshape_1846" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1846"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="786" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1846">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="795" name="q.43" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.43"/>
-			</rt_info>
+		<layer id="787" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13205,30 +12708,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.43">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="796" name="Constant_147604" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="86950316" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.39, onnx::MatMul_8487"/>
-			</rt_info>
+		<layer id="788" name="Constant_85142_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="43475472" size="983040" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="797" name="k.39" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="789" name="Constant_85142" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.39, onnx::MatMul_8487"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="790" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13241,29 +12756,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.39">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="798" name="onnx::Reshape_8503" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="88916396" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8503"/>
-			</rt_info>
+		<layer id="791" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8494,onnx::Reshape_8503,onnx::Reshape_8567,onnx::Reshape_8576,onnx::Reshape_8977,onnx::Reshape_8986,onnx::Reshape_9017,onnx::Reshape_9026,onnx::Reshape_9057,onnx::Reshape_9066">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="799" name="onnx::Transpose_1861" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1861"/>
-			</rt_info>
+		<layer id="792" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13275,7 +12784,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1861">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -13283,21 +12792,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="800" name="Constant_5127" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5127"/>
-			</rt_info>
+		<layer id="793" name="Constant_16135" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="801" name="onnx::Reshape_1862" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1862"/>
-			</rt_info>
+		<layer id="794" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13310,7 +12813,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1862">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -13318,22 +12821,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="802" name="onnx::Reshape_8507" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="88916428" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8507"/>
-			</rt_info>
+		<layer id="795" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8498,onnx::Reshape_8507,onnx::Reshape_8571,onnx::Reshape_8580,onnx::Reshape_8981,onnx::Reshape_8990,onnx::Reshape_9021,onnx::Reshape_9030,onnx::Reshape_9061,onnx::Reshape_9070">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="803" name="k.43" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.43"/>
-			</rt_info>
+		<layer id="796" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13346,78 +12843,89 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.43">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="804" name="onnx::Mul_1898" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="797" name="Constant_86926_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="798" name="Constant_86926" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1898"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1898">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="805" name="Constant_150336" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
-			<output>
+		<layer id="799" name="Multiply_86185" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="806" name="onnx::Softmax_1900" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_1900"/>
-			</rt_info>
+		<layer id="800" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_1900">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Add_output_0,/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="807" name="attn.19" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.19"/>
-			</rt_info>
+		<layer id="801" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -13426,30 +12934,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.19">
+				<port id="1" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="808" name="Constant_147611" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="88916452" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8488, v.39"/>
-			</rt_info>
+		<layer id="802" name="Constant_85149_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="44458568" size="983040" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="809" name="v.39" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="803" name="Constant_85149" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8488, v.39"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="804" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13462,18 +12982,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.39">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="810" name="onnx::Transpose_1886" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1886"/>
-			</rt_info>
+		<layer id="805" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="806" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13485,7 +13010,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1886">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -13493,21 +13018,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="811" name="Constant_5138" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5138"/>
-			</rt_info>
+		<layer id="807" name="Constant_16151" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="812" name="onnx::Reshape_1887" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1887"/>
-			</rt_info>
+		<layer id="808" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13520,7 +13039,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1887">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -13528,11 +13047,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="813" name="v.43" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.43"/>
-			</rt_info>
+		<layer id="809" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="810" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -13545,18 +13069,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.43">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="814" name="out.19" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.19"/>
-			</rt_info>
+		<layer id="811" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -13570,467 +13091,252 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.19">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="815" name="onnx::Gather_1903" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1903, onnx::Gather_1906, onnx::Gather_1909"/>
-			</rt_info>
+		<layer id="812" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="813" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1903,onnx::Gather_1906,onnx::Gather_1909">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="816" name="onnx::Gather_1904" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1904"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1904"/>
-			</output>
-		</layer>
-		<layer id="817" name="Constant_5150" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5150"/>
-			</rt_info>
+		<layer id="814" name="Constant_16177" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="818" name="onnx::Div_1905" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5150, onnx::Div_1905, onnx::Gather_1904"/>
-			</rt_info>
+		<layer id="815" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_1905"/>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="819" name="onnx::Div_1912" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1912"/>
-			</rt_info>
+		<layer id="816" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1912"/>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="820" name="onnx::Cast_1913" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1913, onnx::Cast_1914, onnx::Div_1912, onnx::Unsqueeze_1915"/>
-			</rt_info>
+		<layer id="817" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1913,onnx::Cast_1914,onnx::Unsqueeze_1915"/>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="821" name="onnx::Unsqueeze_1917" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1917"/>
-			</rt_info>
+		<layer id="818" name="Constant_85156_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="45441608" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1917">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="822" name="onnx::Concat_1918" type="Unsqueeze" version="opset1">
+		<layer id="819" name="Constant_85156" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1918, onnx::Unsqueeze_1917"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1918">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="823" name="Constant_90622" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1925"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="824" name="Constant_90623" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="825" name="Gather_90624" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1925"/>
-			</rt_info>
+		<layer id="820" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="826" name="onnx::Reshape_1925" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_1925"/>
-			</rt_info>
+		<layer id="821" name="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1925">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="827" name="onnx::Transpose_1926" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1926"/>
-			</rt_info>
+		<layer id="822" name="/down_blocks.1/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1926">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="828" name="Constant_5271" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5271"/>
-			</rt_info>
+		<layer id="823" name="Constant_16189" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="829" name="onnx::Reshape_1927" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1927"/>
-			</rt_info>
+		<layer id="824" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1927">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="830" name="onnx::Div_1928" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1928"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_1928"/>
-			</output>
-		</layer>
-		<layer id="831" name="onnx::Cast_1929" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_1929, onnx::Cast_1930, onnx::Div_1928, onnx::Unsqueeze_1931"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_1929,onnx::Cast_1930,onnx::Unsqueeze_1931"/>
-			</output>
-		</layer>
-		<layer id="832" name="onnx::Unsqueeze_1934" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1934"/>
-			</rt_info>
+		<layer id="825" name="Constant_86928_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="46260808" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1934">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="833" name="onnx::Concat_1935" type="Unsqueeze" version="opset1">
+		<layer id="826" name="Constant_86928" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1935, onnx::Unsqueeze_1934"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1935">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="834" name="Constant_88311" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5154, onnx::Concat_1937, onnx::Gather_1907, onnx::Unsqueeze_1908, onnx::Unsqueeze_1936"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="835" name="Constant_5154" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5154"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="836" name="onnx::Unsqueeze_1908" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5154, onnx::Concat_1937, onnx::Gather_1907, onnx::Unsqueeze_1908, onnx::Unsqueeze_1936"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_1937">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="837" name="onnx::Gather_1910" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1910"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1910"/>
-			</output>
-		</layer>
-		<layer id="838" name="Constant_5158" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5158"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="839" name="onnx::Unsqueeze_1911" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5158, onnx::Gather_1910, onnx::Unsqueeze_1911"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_1911"/>
-			</output>
-		</layer>
-		<layer id="840" name="onnx::Mul_1932" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1932"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1932"/>
-			</output>
-		</layer>
-		<layer id="841" name="onnx::Unsqueeze_1933" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1932, onnx::Unsqueeze_1933"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_1933"/>
-			</output>
-		</layer>
-		<layer id="842" name="onnx::Unsqueeze_1938" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_1938"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_1938">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="843" name="onnx::Concat_1939" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_1939, onnx::Unsqueeze_1938"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_1939">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="844" name="onnx::Reshape_1940" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1940"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_1940">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="845" name="onnx::MatMul_1941" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1941"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1941">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="846" name="Constant_147618" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="90882532" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1943, onnx::MatMul_8509"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="847" name="onnx::Add_1943" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1943, onnx::MatMul_8509"/>
-			</rt_info>
+		<layer id="827" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14038,117 +13344,51 @@
 					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1943">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="848" name="input.176" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.176"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.176">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="849" name="input.180" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.180"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.180">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="850" name="Constant_5392" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5392"/>
-			</rt_info>
+		<layer id="828" name="Constant_86929_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="46262088" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="851" name="onnx::Mul_1954" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="829" name="Constant_86929" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1950, onnx::Div_1953, onnx::Mul_1954, onnx::Pow_1947, onnx::ReduceMean_1949, onnx::Sqrt_1952, onnx::Sub_1946"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_1954">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="852" name="Constant_150338" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="92520932" size="2560"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="853" name="onnx::Add_1955" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1955"/>
-			</rt_info>
+		<layer id="830" name="/down_blocks.1/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14162,65 +13402,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1955">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="854" name="Constant_150339" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="92523492" size="2560"/>
+		<layer id="831" name="Constant_85164_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="5120, 640" offset="46263368" size="6553600" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="855" name="onnx::MatMul_1956" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="832" name="Constant_85164" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_1956"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_1956">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="856" name="Constant_147626" type="Const" version="opset1">
-			<data element_type="f32" shape="5120, 640" offset="92526052" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1958, onnx::MatMul_8510"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="857" name="onnx::Add_1958" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1958, onnx::MatMul_8510"/>
-			</rt_info>
+		<layer id="833" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14233,18 +13450,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1958">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="858" name="onnx::Shape_1959" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_1959"/>
-			</rt_info>
+		<layer id="834" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -14258,51 +13472,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_1959">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="859" name="Constant_126069" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="835" name="Constant_76610" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="860" name="Constant_126070" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="836" name="Constant_76611" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="861" name="Constant_126066" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="837" name="Constant_76607" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="862" name="onnx::Gather_1960" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1960"/>
-			</rt_info>
+		<layer id="838" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14311,36 +13513,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_1960">
+				<port id="1" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="863" name="onnx::Gather_1961" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="839" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_1961">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="864" name="Constant_5409" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5409"/>
-			</rt_info>
+		<layer id="840" name="Constant_16206" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="865" name="onnx::Add_1962" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5409, onnx::Add_1962, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="841" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -14348,30 +13541,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_1962">
+				<port id="3" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="866" name="onnx::Add_1964" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1964"/>
-			</rt_info>
+		<layer id="842" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_1964">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="867" name="onnx::Div_1965" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1964, onnx::Div_1965"/>
-			</rt_info>
+		<layer id="843" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -14381,27 +13568,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_1965">
+				<port id="2" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="868" name="onnx::Div_1966" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1966"/>
-			</rt_info>
+		<layer id="844" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_1966">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="869" name="onnx::Mul_1967" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_1966, onnx::Mul_1967, onnx::Mul_1968, onnx::Slice_1969"/>
-			</rt_info>
+		<layer id="845" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -14411,26 +13592,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_1967,onnx::Slice_1969">
+				<port id="2" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="870" name="Constant_126065" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="846" name="Constant_76606" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="871" name="ScatterUpdate_126071" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="847" name="ScatterUpdate_76612" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -14451,22 +13626,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="872" name="Constant_126074" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="848" name="Constant_76615" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="873" name="onnx::Mul_1970" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5420, onnx::Gather_1961, onnx::Mul_1970"/>
-			</rt_info>
+		<layer id="849" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14484,50 +13653,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_1970">
+				<port id="4" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="874" name="Constant_126138" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="850" name="Constant_76679" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="875" name="Constant_126137" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="851" name="Constant_76678" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="876" name="Constant_126136" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="852" name="Constant_76677" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="877" name="ScatterUpdate_126139" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="853" name="ScatterUpdate_76680" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -14548,33 +13705,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="878" name="Constant_126140" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="854" name="Constant_76681" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="879" name="onnx::Mul_1971" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1971"/>
-			</rt_info>
+		<layer id="855" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_1971">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="880" name="onnx::Slice_1972" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_1971, onnx::Slice_1972"/>
-			</rt_info>
+		<layer id="856" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -14584,15 +13732,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_1972">
+				<port id="2" precision="I64" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="881" name="ScatterUpdate_126141" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="857" name="ScatterUpdate_76682" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -14613,22 +13758,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="882" name="Constant_126144" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="858" name="Constant_76685" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="883" name="onnx::Div_1973" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_5489, onnx::Div_1973, onnx::Gather_1961"/>
-			</rt_info>
+		<layer id="859" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14646,18 +13785,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_1973">
+				<port id="4" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="884" name="onnx::Mul_1981" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1976, onnx::Erf_1975, onnx::Mul_1978, onnx::Mul_1979, onnx::Mul_1981"/>
-			</rt_info>
+		<layer id="860" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14666,18 +13802,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_1981">
+				<port id="1" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="885" name="input.184" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.184"/>
-			</rt_info>
+		<layer id="861" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14691,30 +13824,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.184">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="886" name="Constant_147634" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 2560" offset="105633252" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1984, onnx::MatMul_8511"/>
-			</rt_info>
+		<layer id="862" name="Constant_85172_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 2560" offset="52816968" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="887" name="onnx::Add_1984" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="863" name="Constant_85172" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1984, onnx::MatMul_8511"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="864" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14727,18 +13872,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1984">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="888" name="onnx::Add_1985" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_1985"/>
-			</rt_info>
+		<layer id="865" name="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -14752,18 +13894,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1985">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="889" name="onnx::Reshape_1986" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_1986"/>
-			</rt_info>
+		<layer id="866" name="/down_blocks.1/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14777,29 +13916,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_1986">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="890" name="onnx::Reshape_8589" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="112186852" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8589"/>
-			</rt_info>
+		<layer id="867" name="/down_blocks.1/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="56093768" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8516,onnx::Reshape_8589">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.0/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="891" name="onnx::Transpose_1996" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_1996"/>
-			</rt_info>
+		<layer id="868" name="/down_blocks.1/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14811,7 +13944,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_1996">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>32</dim>
@@ -14819,21 +13952,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="892" name="Constant_5573" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5573"/>
-			</rt_info>
+		<layer id="869" name="Constant_16371" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="893" name="input.188" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.188"/>
-			</rt_info>
+		<layer id="870" name="/down_blocks.1/attentions.0/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14846,7 +13973,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.188">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -14854,13 +13981,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="894" name="m.down_blocks.1.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="112186884" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.attentions.0.proj_out.weight"/>
-			</rt_info>
+		<layer id="871" name="down_blocks.1.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="56093800" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.attentions.0.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -14868,11 +13992,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="895" name="Convolution_5575" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="872" name="down_blocks.1.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_5575"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.attentions.0.proj_out.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="873" name="/down_blocks.1/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14896,10 +14039,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="896" name="Reshape_5595" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="113825284" size="2560"/>
+		<layer id="874" name="Reshape_16393_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="56913000" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -14907,11 +14050,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="897" name="onnx::Add_1998" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="875" name="Reshape_16393" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_5594, Reshape_5595, onnx::Add_1998"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="876" name="/down_blocks.1/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14927,7 +14089,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_1998">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -14935,11 +14097,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="898" name="onnx::Cast_1999" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.192, onnx::Cast_1999"/>
-			</rt_info>
+		<layer id="877" name="/down_blocks.1/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14955,7 +14114,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.192,onnx::Cast_1999">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.0/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -14963,22 +14122,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="899" name="onnx::Reshape_2001" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2001"/>
-			</rt_info>
+		<layer id="878" name="/down_blocks.1/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2001">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="900" name="onnx::InstanceNormalization_2002" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2002"/>
-			</rt_info>
+		<layer id="879" name="/down_blocks.1/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -14991,29 +14144,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2002">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="901" name="Constant_5634" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5634"/>
-			</rt_info>
+		<layer id="880" name="Constant_16431" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="902" name="MVN_5635" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_5654, Concat_5699, MVN_5635, Multiply_5682, Reshape_5655, Reshape_5700, onnx::Reshape_2005"/>
-			</rt_info>
+		<layer id="881" name="MVN_16432" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15025,18 +14172,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2005">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="903" name="onnx::Reshape_2006" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2006"/>
-			</rt_info>
+		<layer id="882" name="/down_blocks.1/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15046,16 +14190,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2006">
+				<port id="1" precision="I64" names="/down_blocks.1/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="904" name="onnx::Mul_2007" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2007"/>
-			</rt_info>
+		<layer id="883" name="/down_blocks.1/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15067,7 +14208,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2007">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15075,10 +14216,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="905" name="Constant_150342" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="113827844" size="2560"/>
+		<layer id="884" name="Constant_86932_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="56914280" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15086,11 +14227,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="906" name="onnx::Add_2010" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="885" name="Constant_86932" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2010"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="886" name="/down_blocks.1/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15106,7 +14266,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2010">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15114,10 +14274,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="907" name="Constant_150343" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="113830404" size="2560"/>
+		<layer id="887" name="Constant_86933_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="56915560" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15125,11 +14285,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="908" name="onnx::Cast_2013" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="888" name="Constant_86933" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.196, onnx::Cast_2013"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="889" name="/down_blocks.1/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15145,7 +14324,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.196,onnx::Cast_2013">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15153,10 +14332,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="909" name="input.200" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.200, onnx::Mul_2015"/>
-			</rt_info>
+		<layer id="890" name="/down_blocks.1/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15166,7 +14342,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.200">
+				<port id="1" precision="FP32" names="/down_blocks.1/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15174,13 +14350,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="910" name="m.down_blocks.1.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="113832964" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="891" name="down_blocks.1.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="56916840" size="7372800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.1.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>3</dim>
@@ -15188,11 +14361,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="911" name="Convolution_5740" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="892" name="down_blocks.1.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_5740"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.1.conv1.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="893" name="/down_blocks.1/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15216,10 +14408,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="912" name="Reshape_5760" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="128578564" size="2560"/>
+		<layer id="894" name="Reshape_16556_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="64289640" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15227,11 +14419,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="913" name="onnx::Add_2017" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="895" name="Reshape_16556" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_5759, Reshape_5760, onnx::Add_2017"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="896" name="/down_blocks.1/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15247,7 +14458,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2017">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15255,40 +14466,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="914" name="onnx::Gemm_2019" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_2019, onnx::Mul_2018"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="897" name="down_blocks.1.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280" offset="64290920" size="1638400" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_2019">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="915" name="m.down_blocks.1.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280" offset="128581124" size="3276800"/>
+		<layer id="898" name="down_blocks.1.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.1.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.1.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.1.time_emb_proj.weight">
 					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="916" name="MatMul_5792" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_5792"/>
-			</rt_info>
+		<layer id="899" name="/down_blocks.1/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15306,20 +14512,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="917" name="Constant_150344" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640" offset="131857924" size="2560"/>
+		<layer id="900" name="Constant_86934_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640" offset="65929320" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="918" name="onnx::Unsqueeze_2020" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="901" name="Constant_86934" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_5793, onnx::Unsqueeze_2020"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="902" name="/down_blocks.1/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15331,27 +14552,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_2020">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="919" name="onnx::Unsqueeze_2021" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2021"/>
-			</rt_info>
+		<layer id="903" name="/down_blocks.1/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2021">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="920" name="onnx::Unsqueeze_2022" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2022"/>
-			</rt_info>
+		<layer id="904" name="/down_blocks.1/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15362,28 +14577,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_2022">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="921" name="onnx::Unsqueeze_2023" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2023"/>
-			</rt_info>
+		<layer id="905" name="/down_blocks.1/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2023">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="922" name="onnx::Add_2024" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2024"/>
-			</rt_info>
+		<layer id="906" name="/down_blocks.1/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15395,7 +14604,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2024">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15403,11 +14612,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="923" name="onnx::Cast_2025" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.204, onnx::Cast_2025"/>
-			</rt_info>
+		<layer id="907" name="/down_blocks.1/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15423,7 +14629,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.204,onnx::Cast_2025">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15431,22 +14637,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="924" name="onnx::Reshape_2027" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2027"/>
-			</rt_info>
+		<layer id="908" name="/down_blocks.1/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2027">
+				<port id="0" precision="I64" names="/down_blocks.1/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="925" name="onnx::InstanceNormalization_2028" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2028"/>
-			</rt_info>
+		<layer id="909" name="/down_blocks.1/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15459,29 +14659,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2028">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="926" name="Constant_5810" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5810"/>
-			</rt_info>
+		<layer id="910" name="Constant_16604" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="927" name="MVN_5811" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_5830, Concat_5875, MVN_5811, Multiply_5858, Reshape_5831, Reshape_5876, onnx::Reshape_2031"/>
-			</rt_info>
+		<layer id="911" name="MVN_16605" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15493,18 +14687,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2031">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="928" name="onnx::Reshape_2032" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2032"/>
-			</rt_info>
+		<layer id="912" name="/down_blocks.1/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15514,16 +14705,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2032">
+				<port id="1" precision="I64" names="/down_blocks.1/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="929" name="onnx::Mul_2033" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2033"/>
-			</rt_info>
+		<layer id="913" name="/down_blocks.1/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15535,7 +14723,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2033">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15543,10 +14731,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="930" name="Constant_150345" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="131860484" size="2560"/>
+		<layer id="914" name="Constant_86935_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="65930600" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15554,11 +14742,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="931" name="onnx::Add_2036" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="915" name="Constant_86935" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2036"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="916" name="/down_blocks.1/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15574,7 +14781,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2036">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15582,10 +14789,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="932" name="Constant_150346" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="131863044" size="2560"/>
+		<layer id="917" name="Constant_86936_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="65931880" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15593,11 +14800,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="933" name="onnx::Cast_2039" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="918" name="Constant_86936" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.208, onnx::Cast_2039"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="919" name="/down_blocks.1/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15613,7 +14839,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.208,onnx::Cast_2039">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15621,10 +14847,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="934" name="input.212" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.212, onnx::Mul_2041"/>
-			</rt_info>
+		<layer id="920" name="/down_blocks.1/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15634,7 +14857,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.212">
+				<port id="1" precision="FP32" names="/down_blocks.1/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15642,13 +14865,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="935" name="m.down_blocks.1.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="131865604" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="921" name="down_blocks.1.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="65933160" size="7372800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>3</dim>
@@ -15656,11 +14876,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="936" name="Convolution_5916" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="922" name="down_blocks.1.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_5916"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.resnets.1.conv2.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="923" name="/down_blocks.1/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15684,10 +14923,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="937" name="Reshape_5936" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="146611204" size="2560"/>
+		<layer id="924" name="Reshape_16729_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="73305960" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15695,11 +14934,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="938" name="onnx::Add_2043" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="925" name="Reshape_16729" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_5935, Reshape_5936, onnx::Add_2043"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="926" name="/down_blocks.1/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15715,7 +14973,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2043">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15723,11 +14981,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="939" name="onnx::Div_2044" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.216, onnx::Div_2044"/>
-			</rt_info>
+		<layer id="927" name="/down_blocks.1/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15743,7 +14998,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.216,onnx::Div_2044">
+				<port id="2" precision="FP32" names="/down_blocks.1/resnets.1/Add_1_output_0,/down_blocks.1/resnets.1/Div_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15751,22 +15006,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="940" name="onnx::Reshape_2051" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2051"/>
-			</rt_info>
+		<layer id="928" name="/down_blocks.1/attentions.1/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2051">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="941" name="onnx::InstanceNormalization_2052" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2052"/>
-			</rt_info>
+		<layer id="929" name="/down_blocks.1/attentions.1/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15779,29 +15028,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2052">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="942" name="Constant_5976" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_5976"/>
-			</rt_info>
+		<layer id="930" name="Constant_16769" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="943" name="MVN_5977" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_5996, Concat_6041, MVN_5977, Multiply_6024, Reshape_5997, Reshape_6042, onnx::Reshape_2055"/>
-			</rt_info>
+		<layer id="931" name="MVN_16770" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15813,18 +15056,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2055">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="944" name="onnx::Reshape_2056" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2056"/>
-			</rt_info>
+		<layer id="932" name="/down_blocks.1/attentions.1/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15834,16 +15074,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2056">
+				<port id="1" precision="I64" names="/down_blocks.1/attentions.1/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="945" name="onnx::Mul_2057" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2057"/>
-			</rt_info>
+		<layer id="933" name="/down_blocks.1/attentions.1/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15855,7 +15092,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2057">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15863,10 +15100,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="946" name="Constant_150347" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="146613764" size="2560"/>
+		<layer id="934" name="Constant_86937_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="73307240" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15874,11 +15111,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="947" name="onnx::Add_2060" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="935" name="Constant_86937" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2060"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="936" name="/down_blocks.1/attentions.1/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15894,7 +15150,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2060">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15902,10 +15158,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="948" name="Constant_150348" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="146616324" size="2560"/>
+		<layer id="937" name="Constant_86938_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="73308520" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15913,11 +15169,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="949" name="input.220" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="938" name="Constant_86938" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.220"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="939" name="/down_blocks.1/attentions.1/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15933,7 +15208,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.220">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -15941,13 +15216,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="950" name="m.down_blocks.1.attentions.1.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="146618884" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.attentions.1.proj_in.weight"/>
-			</rt_info>
+		<layer id="940" name="down_blocks.1.attentions.1.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="73309800" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.attentions.1.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15955,11 +15227,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="951" name="Convolution_6079" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="941" name="down_blocks.1.attentions.1.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_6079"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.attentions.1.proj_in.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="942" name="/down_blocks.1/attentions.1/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -15983,10 +15274,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="952" name="Reshape_6099" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="148257284" size="2560"/>
+		<layer id="943" name="Reshape_16892_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="74129000" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -15994,11 +15285,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="953" name="onnx::Transpose_2064" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="944" name="Reshape_16892" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_6098, Reshape_6099, onnx::Transpose_2064"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="945" name="/down_blocks.1/attentions.1/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16014,7 +15324,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2064">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -16022,21 +15332,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="954" name="Constant_6127" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6127"/>
-			</rt_info>
+		<layer id="946" name="Constant_16920" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="955" name="onnx::Reshape_2065" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2065"/>
-			</rt_info>
+		<layer id="947" name="/down_blocks.1/attentions.1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16049,7 +15353,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2065">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>32</dim>
@@ -16057,11 +15361,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="956" name="input.224" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.224"/>
-			</rt_info>
+		<layer id="948" name="/down_blocks.1/attentions.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="949" name="/down_blocks.1/attentions.1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16074,29 +15383,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.224">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="957" name="Constant_6132" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6132"/>
-			</rt_info>
+		<layer id="950" name="Constant_16929" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="958" name="onnx::Mul_2083" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2079, onnx::Div_2082, onnx::Mul_2083, onnx::Pow_2076, onnx::ReduceMean_2078, onnx::Sqrt_2081, onnx::Sub_2075"/>
-			</rt_info>
+		<layer id="951" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16108,28 +15411,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2083">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="959" name="Constant_150349" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="148259844" size="2560"/>
+		<layer id="952" name="Constant_86939_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="74130280" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="960" name="onnx::Add_2084" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="953" name="Constant_86939" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2084"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="954" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16143,28 +15463,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2084">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="961" name="Constant_150350" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="148262404" size="2560"/>
+		<layer id="955" name="Constant_86940_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="74131560" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="962" name="onnx::MatMul_2085" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="956" name="Constant_86940" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2085"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="957" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16178,30 +15515,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2085">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="963" name="Constant_147645" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="148264964" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8527, q.47"/>
-			</rt_info>
+		<layer id="958" name="Constant_85182_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="74132840" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="964" name="q.47" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="959" name="Constant_85182" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8527, q.47"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="960" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16214,18 +15563,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.47">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="965" name="onnx::Transpose_2105" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2105"/>
-			</rt_info>
+		<layer id="961" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="962" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16237,7 +15591,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2105">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -16245,21 +15599,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="966" name="Constant_6151" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6151"/>
-			</rt_info>
+		<layer id="963" name="Constant_16952" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="967" name="onnx::Reshape_2106" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2106"/>
-			</rt_info>
+		<layer id="964" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16272,7 +15620,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2106">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -16280,11 +15628,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="968" name="q.51" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.51"/>
-			</rt_info>
+		<layer id="965" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="966" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16297,30 +15650,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.51">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="969" name="Constant_147652" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="149903364" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.47, onnx::MatMul_8528"/>
-			</rt_info>
+		<layer id="967" name="Constant_85189_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="74952040" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="970" name="k.47" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="968" name="Constant_85189" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.47, onnx::MatMul_8528"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="969" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16333,18 +15698,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.47">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="971" name="onnx::Transpose_2130" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2130"/>
-			</rt_info>
+		<layer id="970" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="971" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16356,7 +15726,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2130">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -16364,21 +15734,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="972" name="Constant_6159" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6159"/>
-			</rt_info>
+		<layer id="972" name="Constant_16968" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="973" name="onnx::Reshape_2131" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2131"/>
-			</rt_info>
+		<layer id="973" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16391,7 +15755,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2131">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -16399,11 +15763,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="974" name="k.51" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.51"/>
-			</rt_info>
+		<layer id="974" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="975" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16416,18 +15785,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.51">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="975" name="onnx::Mul_2167" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="976" name="Constant_86941_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="977" name="Constant_86941" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2167"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="978" name="Multiply_86187" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -16435,59 +15831,43 @@
 					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2167">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="976" name="Constant_150351" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="977" name="onnx::Softmax_2169" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_2169"/>
-			</rt_info>
+		<layer id="979" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_2169">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Add_output_0,/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Mul_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="978" name="attn.23" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.23"/>
-			</rt_info>
+		<layer id="980" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -16496,30 +15876,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.23">
+				<port id="1" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Cast_output_0,/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Softmax_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="979" name="Constant_147659" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="151541764" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8529, v.47"/>
-			</rt_info>
+		<layer id="981" name="Constant_85196_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="75771240" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="980" name="v.47" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="982" name="Constant_85196" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8529, v.47"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="983" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16532,18 +15924,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.47">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="981" name="onnx::Transpose_2155" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2155"/>
-			</rt_info>
+		<layer id="984" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="985" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16555,7 +15952,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2155">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -16563,21 +15960,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="982" name="Constant_6167" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6167"/>
-			</rt_info>
+		<layer id="986" name="Constant_16984" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="983" name="onnx::Reshape_2156" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2156"/>
-			</rt_info>
+		<layer id="987" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16590,7 +15981,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2156">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -16598,11 +15989,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="984" name="v.51" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.51"/>
-			</rt_info>
+		<layer id="988" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="989" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16615,18 +16011,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.51">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="985" name="out.23" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.23"/>
-			</rt_info>
+		<layer id="990" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -16640,178 +16033,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.23">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="986" name="onnx::Gather_2172" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2172, onnx::Gather_2175, onnx::Gather_2178"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2172,onnx::Gather_2175,onnx::Gather_2178">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="987" name="onnx::Gather_2173" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2173"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2173"/>
-			</output>
-		</layer>
-		<layer id="988" name="Constant_6179" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6179"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="989" name="onnx::Div_2174" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6179, onnx::Div_2174, onnx::Gather_2173"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_2174"/>
-			</output>
-		</layer>
-		<layer id="990" name="onnx::Div_2181" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2181"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2181"/>
-			</output>
-		</layer>
-		<layer id="991" name="onnx::Cast_2182" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2182, onnx::Cast_2183, onnx::Div_2181, onnx::Unsqueeze_2184"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2182,onnx::Cast_2183,onnx::Unsqueeze_2184"/>
-			</output>
-		</layer>
-		<layer id="992" name="onnx::Unsqueeze_2186" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2186"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2186">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="993" name="onnx::Concat_2187" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2187, onnx::Unsqueeze_2186"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2187">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="994" name="Constant_90627" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2194"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="995" name="Constant_90628" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="996" name="Gather_90629" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2194"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="997" name="onnx::Reshape_2194" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2194"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="991" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2194">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="998" name="onnx::Transpose_2195" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2195"/>
-			</rt_info>
+		<layer id="992" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -16823,7 +16061,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2195">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -16831,21 +16069,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="999" name="Constant_6300" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6300"/>
-			</rt_info>
+		<layer id="993" name="Constant_17010" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1000" name="onnx::Reshape_2196" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2196"/>
-			</rt_info>
+		<layer id="994" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -16858,7 +16090,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2196">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -16866,205 +16098,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1001" name="onnx::Div_2197" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2197"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2197"/>
-			</output>
-		</layer>
-		<layer id="1002" name="onnx::Cast_2198" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2198, onnx::Cast_2199, onnx::Div_2197, onnx::Unsqueeze_2200"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2198,onnx::Cast_2199,onnx::Unsqueeze_2200"/>
-			</output>
-		</layer>
-		<layer id="1003" name="onnx::Unsqueeze_2203" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2203"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2203">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1004" name="onnx::Concat_2204" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2204, onnx::Unsqueeze_2203"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2204">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1005" name="Constant_88338" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6183, onnx::Concat_2206, onnx::Gather_2176, onnx::Unsqueeze_2177, onnx::Unsqueeze_2205"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1006" name="Constant_6183" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6183"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1007" name="onnx::Unsqueeze_2177" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6183, onnx::Concat_2206, onnx::Gather_2176, onnx::Unsqueeze_2177, onnx::Unsqueeze_2205"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_2206">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1008" name="onnx::Gather_2179" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2179"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2179"/>
-			</output>
-		</layer>
-		<layer id="1009" name="Constant_6187" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6187"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1010" name="onnx::Unsqueeze_2180" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6187, onnx::Gather_2179, onnx::Unsqueeze_2180"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_2180"/>
-			</output>
-		</layer>
-		<layer id="1011" name="onnx::Mul_2201" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2201"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2201"/>
-			</output>
-		</layer>
-		<layer id="1012" name="onnx::Unsqueeze_2202" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2201, onnx::Unsqueeze_2202"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_2202"/>
-			</output>
-		</layer>
-		<layer id="1013" name="onnx::Unsqueeze_2207" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2207"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2207">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1014" name="onnx::Concat_2208" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2208, onnx::Unsqueeze_2207"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2208">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1015" name="onnx::Reshape_2209" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2209"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="995" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2209">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1016" name="onnx::MatMul_2210" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2210"/>
-			</rt_info>
+		<layer id="996" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17077,30 +16120,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2210">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1017" name="Constant_147666" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="153180164" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2212, onnx::MatMul_8558"/>
-			</rt_info>
+		<layer id="997" name="Constant_85203_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="76590440" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1018" name="onnx::Add_2212" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="998" name="Constant_85203" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2212, onnx::MatMul_8558"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="999" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17113,18 +16168,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2212">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1019" name="input.228" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.228"/>
-			</rt_info>
+		<layer id="1000" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -17138,18 +16190,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.228">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1020" name="input.232" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.232"/>
-			</rt_info>
+		<layer id="1001" name="/down_blocks.1/attentions.1/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17163,29 +16212,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.232">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1021" name="Constant_6421" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6421"/>
-			</rt_info>
+		<layer id="1002" name="Constant_17022" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1022" name="onnx::Mul_2223" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2219, onnx::Div_2222, onnx::Mul_2223, onnx::Pow_2216, onnx::ReduceMean_2218, onnx::Sqrt_2221, onnx::Sub_2215"/>
-			</rt_info>
+		<layer id="1003" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17197,28 +16240,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2223">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1023" name="Constant_150353" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="154818564" size="2560"/>
+		<layer id="1004" name="Constant_86943_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="77409640" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1024" name="onnx::Add_2224" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1005" name="Constant_86943" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2224"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1006" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17232,28 +16292,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2224">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1025" name="Constant_150354" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="154821124" size="2560"/>
+		<layer id="1007" name="Constant_86944_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="77410920" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1026" name="onnx::MatMul_2225" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1008" name="Constant_86944" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2225"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1009" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17267,30 +16344,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2225">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1027" name="Constant_147674" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="154823684" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8559, q.55"/>
-			</rt_info>
+		<layer id="1010" name="Constant_85211_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="77412200" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1028" name="q.55" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1011" name="Constant_85211" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8559, q.55"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1012" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17303,161 +16392,35 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.55">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1029" name="Constant_107268" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="1013" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1030" name="onnx::Gather_2232" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2232, onnx::Gather_2235, onnx::Gather_2238"/>
-			</rt_info>
+		<layer id="1014" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2232,onnx::Gather_2235,onnx::Gather_2238">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1031" name="onnx::Gather_2239" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2239"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2239"/>
-			</output>
-		</layer>
-		<layer id="1032" name="Constant_6447" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6447"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1033" name="onnx::Div_2240" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6447, onnx::Div_2240, onnx::Gather_2239"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_2240"/>
-			</output>
-		</layer>
-		<layer id="1034" name="onnx::Div_2241" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2241"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2241"/>
-			</output>
-		</layer>
-		<layer id="1035" name="onnx::Cast_2242" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2242, onnx::Cast_2243, onnx::Div_2241, onnx::Unsqueeze_2244"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2242,onnx::Cast_2243,onnx::Unsqueeze_2244"/>
-			</output>
-		</layer>
-		<layer id="1036" name="onnx::Unsqueeze_2252" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2252"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2252">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1037" name="onnx::Concat_2253" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2253, onnx::Unsqueeze_2252"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2253">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1038" name="onnx::Reshape_2254" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2254"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1039" name="onnx::Transpose_2255" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2255"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2255">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -17465,21 +16428,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1040" name="Constant_6560" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6560"/>
-			</rt_info>
+		<layer id="1015" name="Constant_17045" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1041" name="onnx::Reshape_2256" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2256"/>
-			</rt_info>
+		<layer id="1016" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17492,7 +16449,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2256">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -17500,205 +16457,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1042" name="onnx::Gather_2233" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2233"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2233"/>
-			</output>
-		</layer>
-		<layer id="1043" name="Constant_6439" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6439"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1044" name="onnx::Unsqueeze_2234" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6439, onnx::Gather_2233, onnx::Unsqueeze_2234"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_2234"/>
-			</output>
-		</layer>
-		<layer id="1045" name="onnx::Mul_2257" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2257"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2257"/>
-			</output>
-		</layer>
-		<layer id="1046" name="onnx::Unsqueeze_2258" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2257, onnx::Unsqueeze_2258"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_2258"/>
-			</output>
-		</layer>
-		<layer id="1047" name="onnx::Unsqueeze_2263" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2263"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2263">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1048" name="onnx::Concat_2264" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2264, onnx::Unsqueeze_2263"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2264">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1049" name="Constant_88365" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6443, onnx::Concat_2266, onnx::Gather_2236, onnx::Unsqueeze_2237, onnx::Unsqueeze_2265"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1050" name="Constant_6443" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6443"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1051" name="onnx::Unsqueeze_2237" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6443, onnx::Concat_2266, onnx::Gather_2236, onnx::Unsqueeze_2237, onnx::Unsqueeze_2265"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_2266">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1052" name="onnx::Div_2259" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2259"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2259"/>
-			</output>
-		</layer>
-		<layer id="1053" name="onnx::Cast_2260" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2260, onnx::Cast_2261, onnx::Div_2259, onnx::Unsqueeze_2262"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2260,onnx::Cast_2261,onnx::Unsqueeze_2262"/>
-			</output>
-		</layer>
-		<layer id="1054" name="onnx::Unsqueeze_2267" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2267"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2267">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1055" name="onnx::Concat_2268" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2268, onnx::Unsqueeze_2267"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2268">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1056" name="onnx::Reshape_2269" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2269"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1017" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2269">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1057" name="q.59" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.59"/>
-			</rt_info>
+		<layer id="1018" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17711,30 +16479,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.59">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1058" name="Constant_147681" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="156462084" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.55, onnx::MatMul_8560"/>
-			</rt_info>
+		<layer id="1019" name="Constant_85218_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="78231400" size="983040" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1059" name="k.55" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1020" name="Constant_85218" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.55, onnx::MatMul_8560"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1021" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17747,18 +16527,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.55">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1060" name="onnx::Transpose_2284" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2284"/>
-			</rt_info>
+		<layer id="1022" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1023" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17770,7 +16555,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2284">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -17778,21 +16563,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1061" name="Constant_6681" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6681"/>
-			</rt_info>
+		<layer id="1024" name="Constant_17061" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1062" name="onnx::Reshape_2285" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2285"/>
-			</rt_info>
+		<layer id="1025" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17805,7 +16584,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2285">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -17813,11 +16592,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1063" name="k.59" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.59"/>
-			</rt_info>
+		<layer id="1026" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1027" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17830,78 +16614,89 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.59">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1064" name="onnx::Mul_2321" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="1028" name="Constant_86945_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1029" name="Constant_86945" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2321"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2321">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1065" name="Constant_150355" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
-			<output>
+		<layer id="1030" name="Multiply_86189" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1066" name="onnx::Softmax_2323" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_2323"/>
-			</rt_info>
+		<layer id="1031" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_2323">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Add_output_0,/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1067" name="attn.27" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.27"/>
-			</rt_info>
+		<layer id="1032" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -17910,30 +16705,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.27">
+				<port id="1" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Cast_output_0,/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1068" name="Constant_147688" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="158428164" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8561, v.55"/>
-			</rt_info>
+		<layer id="1033" name="Constant_85225_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="79214440" size="983040" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1069" name="v.55" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1034" name="Constant_85225" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8561, v.55"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1035" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17946,18 +16753,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.55">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1070" name="onnx::Transpose_2309" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2309"/>
-			</rt_info>
+		<layer id="1036" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1037" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -17969,7 +16781,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2309">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -17977,21 +16789,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1071" name="Constant_6689" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6689"/>
-			</rt_info>
+		<layer id="1038" name="Constant_17077" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1072" name="onnx::Reshape_2310" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2310"/>
-			</rt_info>
+		<layer id="1039" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -18004,7 +16810,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2310">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -18012,11 +16818,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1073" name="v.59" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.59"/>
-			</rt_info>
+		<layer id="1040" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1041" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -18029,18 +16840,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.59">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1074" name="out.27" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.27"/>
-			</rt_info>
+		<layer id="1042" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -18054,585 +16862,304 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.27">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1075" name="onnx::Gather_2326" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2326, onnx::Gather_2329, onnx::Gather_2332"/>
-			</rt_info>
+		<layer id="1043" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1044" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2326,onnx::Gather_2329,onnx::Gather_2332">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1076" name="onnx::Gather_2327" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2327"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2327"/>
-			</output>
-		</layer>
-		<layer id="1077" name="Constant_6701" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6701"/>
-			</rt_info>
+		<layer id="1045" name="Constant_17103" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1078" name="onnx::Div_2328" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6701, onnx::Div_2328, onnx::Gather_2327"/>
-			</rt_info>
+		<layer id="1046" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_2328"/>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1079" name="onnx::Div_2335" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2335"/>
-			</rt_info>
+		<layer id="1047" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_2335"/>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1080" name="onnx::Cast_2336" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2336, onnx::Cast_2337, onnx::Div_2335, onnx::Unsqueeze_2338"/>
-			</rt_info>
+		<layer id="1048" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2336,onnx::Cast_2337,onnx::Unsqueeze_2338"/>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1081" name="onnx::Unsqueeze_2340" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2340"/>
-			</rt_info>
+		<layer id="1049" name="Constant_85232_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="80197480" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2340">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1082" name="onnx::Concat_2341" type="Unsqueeze" version="opset1">
+		<layer id="1050" name="Constant_85232" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2341, onnx::Unsqueeze_2340"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2341">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1083" name="Constant_90637" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2348"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1084" name="Constant_90638" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1085" name="Gather_90639" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2348"/>
-			</rt_info>
+		<layer id="1051" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1086" name="onnx::Reshape_2348" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2348"/>
-			</rt_info>
+		<layer id="1052" name="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2348">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1087" name="onnx::Transpose_2349" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2349"/>
-			</rt_info>
+		<layer id="1053" name="/down_blocks.1/attentions.1/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2349">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1088" name="Constant_6822" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6822"/>
-			</rt_info>
+		<layer id="1054" name="Constant_17115" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1089" name="onnx::Reshape_2350" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2350"/>
-			</rt_info>
+		<layer id="1055" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2350">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1090" name="onnx::Div_2351" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2351"/>
-			</rt_info>
+		<layer id="1056" name="Constant_86947_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="81016680" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_2351"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1091" name="onnx::Cast_2352" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="1057" name="Constant_86947" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2352, onnx::Cast_2353, onnx::Div_2351, onnx::Unsqueeze_2354"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2352,onnx::Cast_2353,onnx::Unsqueeze_2354"/>
-			</output>
-		</layer>
-		<layer id="1092" name="onnx::Unsqueeze_2357" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2357"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2357">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1093" name="onnx::Concat_2358" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2358, onnx::Unsqueeze_2357"/>
-			</rt_info>
+		<layer id="1058" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2358">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm3/Mul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1094" name="Constant_88392" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6705, onnx::Concat_2360, onnx::Gather_2330, onnx::Unsqueeze_2331, onnx::Unsqueeze_2359"/>
-			</rt_info>
+		<layer id="1059" name="Constant_86948_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="81017960" size="1280" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1095" name="Constant_6705" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6705"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1096" name="onnx::Unsqueeze_2331" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6705, onnx::Concat_2360, onnx::Gather_2330, onnx::Unsqueeze_2331, onnx::Unsqueeze_2359"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_2360">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1097" name="onnx::Gather_2333" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2333"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2333"/>
-			</output>
-		</layer>
-		<layer id="1098" name="Constant_6709" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6709"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1099" name="onnx::Unsqueeze_2334" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6709, onnx::Gather_2333, onnx::Unsqueeze_2334"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_2334"/>
-			</output>
-		</layer>
-		<layer id="1100" name="onnx::Mul_2355" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2355"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2355"/>
-			</output>
-		</layer>
-		<layer id="1101" name="onnx::Unsqueeze_2356" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2355, onnx::Unsqueeze_2356"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_2356"/>
-			</output>
-		</layer>
-		<layer id="1102" name="onnx::Unsqueeze_2361" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2361"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2361">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1103" name="onnx::Concat_2362" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2362, onnx::Unsqueeze_2361"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2362">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1104" name="onnx::Reshape_2363" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2363"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2363">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1105" name="onnx::MatMul_2364" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2364"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2364">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1106" name="Constant_147695" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="160394244" size="1638400"/>
+		<layer id="1060" name="Constant_86948" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2366, onnx::MatMul_8582"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1107" name="onnx::Add_2366" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2366, onnx::MatMul_8582"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2366">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1108" name="input.236" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.236"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.236">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1109" name="input.240" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.240"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.240">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1110" name="Constant_6943" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6943"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1111" name="onnx::Mul_2377" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2373, onnx::Div_2376, onnx::Mul_2377, onnx::Pow_2370, onnx::ReduceMean_2372, onnx::Sqrt_2375, onnx::Sub_2369"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2377">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1112" name="Constant_150357" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="162032644" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1113" name="onnx::Add_2378" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2378"/>
-			</rt_info>
+		<layer id="1061" name="/down_blocks.1/attentions.1/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -18646,65 +17173,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2378">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1114" name="Constant_150358" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="162035204" size="2560"/>
+		<layer id="1062" name="Constant_85240_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="5120, 640" offset="81019240" size="6553600" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1115" name="onnx::MatMul_2379" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1063" name="Constant_85240" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2379"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2379">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1116" name="Constant_147703" type="Const" version="opset1">
-			<data element_type="f32" shape="5120, 640" offset="162037764" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2381, onnx::MatMul_8583"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1117" name="onnx::Add_2381" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2381, onnx::MatMul_8583"/>
-			</rt_info>
+		<layer id="1064" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -18717,18 +17221,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2381">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1118" name="onnx::Shape_2382" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_2382"/>
-			</rt_info>
+		<layer id="1065" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -18742,51 +17243,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_2382">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1119" name="Constant_126270" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1066" name="Constant_76811" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1120" name="Constant_126271" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1067" name="Constant_76812" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1121" name="Constant_126267" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1068" name="Constant_76808" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1122" name="onnx::Gather_2383" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2383"/>
-			</rt_info>
+		<layer id="1069" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -18795,36 +17284,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2383">
+				<port id="1" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1123" name="onnx::Gather_2384" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1070" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2384">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1124" name="Constant_6960" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6960"/>
-			</rt_info>
+		<layer id="1071" name="Constant_17132" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="1125" name="onnx::Add_2385" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_6960, onnx::Add_2385, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1072" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -18832,30 +17312,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_2385">
+				<port id="3" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1126" name="onnx::Add_2387" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2387"/>
-			</rt_info>
+		<layer id="1073" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_2387">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1127" name="onnx::Div_2388" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2387, onnx::Div_2388"/>
-			</rt_info>
+		<layer id="1074" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -18865,27 +17339,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_2388">
+				<port id="2" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1128" name="onnx::Div_2389" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2389"/>
-			</rt_info>
+		<layer id="1075" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_2389">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1129" name="onnx::Mul_2390" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2389, onnx::Mul_2390, onnx::Mul_2391, onnx::Slice_2392"/>
-			</rt_info>
+		<layer id="1076" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -18895,26 +17363,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_2390,onnx::Slice_2392">
+				<port id="2" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Div_output_0,/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1130" name="Constant_126266" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1077" name="Constant_76807" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1131" name="ScatterUpdate_126272" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1078" name="ScatterUpdate_76813" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -18935,22 +17397,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1132" name="Constant_126275" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1079" name="Constant_76816" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1133" name="onnx::Mul_2393" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_6971, onnx::Gather_2384, onnx::Mul_2393"/>
-			</rt_info>
+		<layer id="1080" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -18968,50 +17424,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_2393">
+				<port id="4" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1134" name="Constant_126339" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1081" name="Constant_76880" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1135" name="Constant_126338" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1082" name="Constant_76879" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1136" name="Constant_126337" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1083" name="Constant_76878" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1137" name="ScatterUpdate_126340" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1084" name="ScatterUpdate_76881" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -19032,33 +17476,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1138" name="Constant_126341" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1085" name="Constant_76882" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1139" name="onnx::Mul_2394" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2394"/>
-			</rt_info>
+		<layer id="1086" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2394">
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1140" name="onnx::Slice_2395" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2394, onnx::Slice_2395"/>
-			</rt_info>
+		<layer id="1087" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -19068,15 +17503,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_2395">
+				<port id="2" precision="I64" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1141" name="ScatterUpdate_126342" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1088" name="ScatterUpdate_76883" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -19097,22 +17529,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1142" name="Constant_126345" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1089" name="Constant_76886" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1143" name="onnx::Div_2396" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_7040, onnx::Div_2396, onnx::Gather_2384"/>
-			</rt_info>
+		<layer id="1090" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19130,18 +17556,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_2396">
+				<port id="4" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1144" name="onnx::Mul_2404" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2399, onnx::Erf_2398, onnx::Mul_2401, onnx::Mul_2402, onnx::Mul_2404"/>
-			</rt_info>
+		<layer id="1091" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19150,18 +17573,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_2404">
+				<port id="1" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1145" name="input.244" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.244"/>
-			</rt_info>
+		<layer id="1092" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19175,30 +17595,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.244">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1146" name="Constant_147711" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 2560" offset="175144964" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2407, onnx::MatMul_8584"/>
-			</rt_info>
+		<layer id="1093" name="Constant_85248_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 2560" offset="87572840" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1147" name="onnx::Add_2407" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1094" name="Constant_85248" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2407, onnx::MatMul_8584"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1095" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19211,18 +17643,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2407">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1148" name="onnx::Add_2408" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2408"/>
-			</rt_info>
+		<layer id="1096" name="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -19236,18 +17665,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2408">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1149" name="onnx::Reshape_2409" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2409"/>
-			</rt_info>
+		<layer id="1097" name="/down_blocks.1/attentions.1/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19261,18 +17687,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2409">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1150" name="onnx::Transpose_2419" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2419"/>
-			</rt_info>
+		<layer id="1098" name="/down_blocks.1/attentions.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="56093768" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.1/attentions.1/Constant_1_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1099" name="/down_blocks.1/attentions.1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19284,7 +17715,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2419">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>32</dim>
@@ -19292,21 +17723,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1151" name="Constant_7121" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7121"/>
-			</rt_info>
+		<layer id="1100" name="Constant_17297" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1152" name="input.248" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.248"/>
-			</rt_info>
+		<layer id="1101" name="/down_blocks.1/attentions.1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19319,7 +17744,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.248">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -19327,13 +17752,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1153" name="m.down_blocks.1.attentions.1.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="181698564" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.attentions.1.proj_out.weight"/>
-			</rt_info>
+		<layer id="1102" name="down_blocks.1.attentions.1.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="90849640" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.attentions.1.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -19341,11 +17763,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1154" name="Convolution_7123" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1103" name="down_blocks.1.attentions.1.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_7123"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.attentions.1.proj_out.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1104" name="/down_blocks.1/attentions.1/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19369,10 +17810,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1155" name="Reshape_7143" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="183336964" size="2560"/>
+		<layer id="1105" name="Reshape_17319_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="91668840" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -19380,11 +17821,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1156" name="onnx::Add_2421" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1106" name="Reshape_17319" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7142, Reshape_7143, onnx::Add_2421"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1107" name="/down_blocks.1/attentions.1/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19400,7 +17860,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2421">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -19408,11 +17868,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1157" name="input.252" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.252"/>
-			</rt_info>
+		<layer id="1108" name="/down_blocks.1/attentions.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19428,7 +17885,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.252">
+				<port id="2" precision="FP32" names="/down_blocks.1/attentions.1/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -19436,13 +17893,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1158" name="m.down_blocks.1.downsamplers.0.conv.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="183339524" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.1.downsamplers.0.conv.weight"/>
-			</rt_info>
+		<layer id="1109" name="down_blocks.1.downsamplers.0.conv.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="91670120" size="7372800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.1.downsamplers.0.conv.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>3</dim>
@@ -19450,11 +17904,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1159" name="Convolution_7172" type="Convolution" version="opset1">
-			<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1110" name="down_blocks.1.downsamplers.0.conv.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_7172"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.1.downsamplers.0.conv.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1111" name="/down_blocks.1/downsamplers.0/conv/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19478,10 +17951,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1160" name="Reshape_7192" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="198085124" size="2560"/>
+		<layer id="1112" name="Reshape_17368_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="99042920" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -19489,11 +17962,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1161" name="onnx::Cast_2423" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1113" name="Reshape_17368" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7191, Reshape_7192, input.256, onnx::Cast_2423"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1114" name="/down_blocks.1/downsamplers.0/conv/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19509,7 +18001,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.256,onnx::Cast_2423">
+				<port id="2" precision="FP32" names="/down_blocks.1/downsamplers.0/conv/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>16</dim>
@@ -19517,13 +18009,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1162" name="m.down_blocks.2.resnets.0.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 640, 1, 1" offset="198087684" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.0.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="1115" name="down_blocks.2.resnets.0.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 640, 1, 1" offset="99044200" size="1638400" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.0.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -19531,11 +18020,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1163" name="Convolution_7560" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1116" name="down_blocks.2.resnets.0.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_7560"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.0.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1117" name="/down_blocks.2/resnets.0/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19559,10 +18067,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1164" name="Reshape_7580" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="201364484" size="5120"/>
+		<layer id="1118" name="Reshape_17751_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="100682600" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -19570,11 +18078,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1165" name="onnx::Add_2468" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1119" name="Reshape_17751" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7579, Reshape_7580, onnx::Add_2468"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1120" name="/down_blocks.2/resnets.0/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19590,7 +18117,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2468">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -19598,22 +18125,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1166" name="onnx::Reshape_2425" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2425"/>
-			</rt_info>
+		<layer id="1121" name="/down_blocks.2/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2425">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1167" name="onnx::InstanceNormalization_2426" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2426"/>
-			</rt_info>
+		<layer id="1122" name="/down_blocks.2/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19626,29 +18147,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2426">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1168" name="Constant_7230" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7230"/>
-			</rt_info>
+		<layer id="1123" name="Constant_17405" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1169" name="MVN_7231" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7250, Concat_7295, MVN_7231, Multiply_7278, Reshape_7251, Reshape_7296, onnx::Reshape_2429"/>
-			</rt_info>
+		<layer id="1124" name="MVN_17406" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19660,18 +18175,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2429">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1170" name="onnx::Reshape_2430" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2430"/>
-			</rt_info>
+		<layer id="1125" name="/down_blocks.2/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19681,16 +18193,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2430">
+				<port id="1" precision="I64" names="/down_blocks.2/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1171" name="onnx::Mul_2431" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2431"/>
-			</rt_info>
+		<layer id="1126" name="/down_blocks.2/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19702,7 +18211,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2431">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>16</dim>
@@ -19710,10 +18219,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1172" name="Constant_150361" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="201369604" size="2560"/>
+		<layer id="1127" name="Constant_86951_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="100685160" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -19721,11 +18230,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1173" name="onnx::Add_2434" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1128" name="Constant_86951" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2434"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1129" name="/down_blocks.2/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19741,7 +18269,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2434">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>16</dim>
@@ -19749,10 +18277,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1174" name="Constant_150362" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="201372164" size="2560"/>
+		<layer id="1130" name="Constant_86952_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="100686440" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -19760,11 +18288,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1175" name="onnx::Cast_2437" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1131" name="Constant_86952" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.260, onnx::Cast_2437"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1132" name="/down_blocks.2/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19780,7 +18327,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.260,onnx::Cast_2437">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>16</dim>
@@ -19788,10 +18335,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1176" name="input.264" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.264, onnx::Mul_2439"/>
-			</rt_info>
+		<layer id="1133" name="/down_blocks.2/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19801,7 +18345,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.264">
+				<port id="1" precision="FP32" names="/down_blocks.2/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>16</dim>
@@ -19809,13 +18353,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1177" name="m.down_blocks.2.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 640, 3, 3" offset="201374724" size="29491200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="1134" name="down_blocks.2.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 640, 3, 3" offset="100687720" size="14745600" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>640</dim>
 					<dim>3</dim>
@@ -19823,11 +18364,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1178" name="Convolution_7336" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1135" name="down_blocks.2.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_7336"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.0.conv1.weight">
+					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1136" name="/down_blocks.2/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19851,10 +18411,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1179" name="Reshape_7356" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="230865924" size="5120"/>
+		<layer id="1137" name="Reshape_17530_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="115433320" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -19862,11 +18422,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1180" name="onnx::Add_2441" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1138" name="Reshape_17530" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7355, Reshape_7356, onnx::Add_2441"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1139" name="/down_blocks.2/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19882,7 +18461,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2441">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -19890,40 +18469,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1181" name="onnx::Gemm_2443" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_2443, onnx::Mul_2442"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1140" name="down_blocks.2.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="115435880" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_2443">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1182" name="m.down_blocks.2.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="230871044" size="6553600"/>
+		<layer id="1141" name="down_blocks.2.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.0.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1183" name="MatMul_7388" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_7388"/>
-			</rt_info>
+		<layer id="1142" name="/down_blocks.2/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19941,20 +18515,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1184" name="Constant_150363" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="237424644" size="5120"/>
+		<layer id="1143" name="Constant_86953_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="118712680" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1185" name="onnx::Unsqueeze_2444" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1144" name="Constant_86953" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_7389, onnx::Unsqueeze_2444"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1145" name="/down_blocks.2/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19966,27 +18555,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_2444">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1186" name="onnx::Unsqueeze_2445" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2445"/>
-			</rt_info>
+		<layer id="1146" name="/down_blocks.2/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2445">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1187" name="onnx::Unsqueeze_2446" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2446"/>
-			</rt_info>
+		<layer id="1147" name="/down_blocks.2/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -19997,28 +18580,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_2446">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1188" name="onnx::Unsqueeze_2447" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2447"/>
-			</rt_info>
+		<layer id="1148" name="/down_blocks.2/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2447">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1189" name="onnx::Add_2448" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2448"/>
-			</rt_info>
+		<layer id="1149" name="/down_blocks.2/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20030,7 +18607,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2448">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20038,11 +18615,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1190" name="onnx::Cast_2449" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.268, onnx::Cast_2449"/>
-			</rt_info>
+		<layer id="1150" name="/down_blocks.2/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20058,7 +18632,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.268,onnx::Cast_2449">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20066,22 +18640,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1191" name="onnx::Reshape_2451" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2451"/>
-			</rt_info>
+		<layer id="1151" name="/down_blocks.2/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2451">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.0/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1192" name="onnx::InstanceNormalization_2452" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2452"/>
-			</rt_info>
+		<layer id="1152" name="/down_blocks.2/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20094,29 +18662,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2452">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1193" name="Constant_7406" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7406"/>
-			</rt_info>
+		<layer id="1153" name="Constant_17578" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1194" name="MVN_7407" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7426, Concat_7471, MVN_7407, Multiply_7454, Reshape_7427, Reshape_7472, onnx::Reshape_2455"/>
-			</rt_info>
+		<layer id="1154" name="MVN_17579" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20128,18 +18690,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2455">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1195" name="onnx::Reshape_2456" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2456"/>
-			</rt_info>
+		<layer id="1155" name="/down_blocks.2/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20149,16 +18708,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2456">
+				<port id="1" precision="I64" names="/down_blocks.2/resnets.0/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1196" name="onnx::Mul_2457" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2457"/>
-			</rt_info>
+		<layer id="1156" name="/down_blocks.2/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20170,7 +18726,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2457">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20178,10 +18734,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1197" name="Constant_150364" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="237429764" size="5120"/>
+		<layer id="1157" name="Constant_86954_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="118715240" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20189,11 +18745,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1198" name="onnx::Add_2460" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1158" name="Constant_86954" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2460"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1159" name="/down_blocks.2/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20209,7 +18784,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2460">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20217,10 +18792,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1199" name="Constant_150365" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="237434884" size="5120"/>
+		<layer id="1160" name="Constant_86955_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="118717800" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20228,11 +18803,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1200" name="onnx::Cast_2463" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1161" name="Constant_86955" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.272, onnx::Cast_2463"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1162" name="/down_blocks.2/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20248,7 +18842,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.272,onnx::Cast_2463">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20256,10 +18850,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1201" name="input.276" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.276, onnx::Mul_2465"/>
-			</rt_info>
+		<layer id="1163" name="/down_blocks.2/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20269,7 +18860,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.276">
+				<port id="1" precision="FP32" names="/down_blocks.2/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20277,13 +18868,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1202" name="m.down_blocks.2.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="237440004" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="1164" name="down_blocks.2.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="118720360" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -20291,11 +18879,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1203" name="Convolution_7512" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1165" name="down_blocks.2.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_7512"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.0.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1166" name="/down_blocks.2/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20319,10 +18926,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1204" name="Reshape_7532" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="296422404" size="5120"/>
+		<layer id="1167" name="Reshape_17703_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="148211560" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20330,11 +18937,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1205" name="onnx::Add_2467" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1168" name="Reshape_17703" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7531, Reshape_7532, onnx::Add_2467"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1169" name="/down_blocks.2/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20350,7 +18976,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2467">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20358,11 +18984,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1206" name="onnx::Div_2469" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.280, onnx::Div_2469"/>
-			</rt_info>
+		<layer id="1170" name="/down_blocks.2/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20378,7 +19001,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.280,onnx::Div_2469">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.0/Add_1_output_0,/down_blocks.2/resnets.0/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20386,22 +19009,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1207" name="onnx::Reshape_2476" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2476"/>
-			</rt_info>
+		<layer id="1171" name="/down_blocks.2/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2476">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1208" name="onnx::InstanceNormalization_2477" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2477"/>
-			</rt_info>
+		<layer id="1172" name="/down_blocks.2/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20414,29 +19031,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2477">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1209" name="Constant_7620" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7620"/>
-			</rt_info>
+		<layer id="1173" name="Constant_17791" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1210" name="MVN_7621" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7640, Concat_7685, MVN_7621, Multiply_7668, Reshape_7641, Reshape_7686, onnx::Reshape_2480"/>
-			</rt_info>
+		<layer id="1174" name="MVN_17792" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20448,18 +19059,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2480">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1211" name="onnx::Reshape_2481" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2481"/>
-			</rt_info>
+		<layer id="1175" name="/down_blocks.2/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20469,16 +19077,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2481">
+				<port id="1" precision="I64" names="/down_blocks.2/attentions.0/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1212" name="onnx::Mul_2482" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2482"/>
-			</rt_info>
+		<layer id="1176" name="/down_blocks.2/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20490,7 +19095,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2482">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20498,10 +19103,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1213" name="Constant_150366" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="296427524" size="5120"/>
+		<layer id="1177" name="Constant_86956_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="148214120" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20509,11 +19114,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1214" name="onnx::Add_2485" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1178" name="Constant_86956" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2485"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1179" name="/down_blocks.2/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20529,7 +19153,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2485">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20537,10 +19161,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1215" name="Constant_150367" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="296432644" size="5120"/>
+		<layer id="1180" name="Constant_86957_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="148216680" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20548,11 +19172,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1216" name="input.284" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1181" name="Constant_86957" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.284"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1182" name="/down_blocks.2/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20568,7 +19211,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.284">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20576,13 +19219,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1217" name="m.down_blocks.2.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="296437764" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.attentions.0.proj_in.weight"/>
-			</rt_info>
+		<layer id="1183" name="down_blocks.2.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="148219240" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.attentions.0.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20590,11 +19230,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1218" name="Convolution_7723" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1184" name="down_blocks.2.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_7723"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.attentions.0.proj_in.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1185" name="/down_blocks.2/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20618,10 +19277,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1219" name="Reshape_7743" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="302991364" size="5120"/>
+		<layer id="1186" name="Reshape_17914_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="151496040" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -20629,11 +19288,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1220" name="onnx::Transpose_2489" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1187" name="Reshape_17914" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_7742, Reshape_7743, onnx::Transpose_2489"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1188" name="/down_blocks.2/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20649,7 +19327,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2489">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -20657,21 +19335,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1221" name="Constant_7771" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7771"/>
-			</rt_info>
+		<layer id="1189" name="Constant_17942" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1222" name="onnx::Reshape_2490" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2490"/>
-			</rt_info>
+		<layer id="1190" name="/down_blocks.2/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20684,7 +19356,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2490">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/Transpose_output_0">
 					<dim>2</dim>
 					<dim>16</dim>
 					<dim>16</dim>
@@ -20692,22 +19364,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1223" name="onnx::Reshape_8672" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="302996484" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8672"/>
-			</rt_info>
+		<layer id="1191" name="/down_blocks.2/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8599,onnx::Reshape_8672">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1224" name="input.288" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.288"/>
-			</rt_info>
+		<layer id="1192" name="/down_blocks.2/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20720,29 +19386,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.288">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/Reshape_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1225" name="Constant_7779" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7779"/>
-			</rt_info>
+		<layer id="1193" name="Constant_17951" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1226" name="onnx::Mul_2508" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2504, onnx::Div_2507, onnx::Mul_2508, onnx::Pow_2501, onnx::ReduceMean_2503, onnx::Sqrt_2506, onnx::Sub_2500"/>
-			</rt_info>
+		<layer id="1194" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20754,28 +19414,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2508">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1227" name="Constant_150368" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="302996508" size="5120"/>
+		<layer id="1195" name="Constant_86958_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="151498624" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1228" name="onnx::Add_2509" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1196" name="Constant_86958" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2509"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1197" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20789,28 +19466,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2509">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1229" name="Constant_150369" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="303001628" size="5120"/>
+		<layer id="1198" name="Constant_86959_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="151501184" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1230" name="onnx::MatMul_2510" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1199" name="Constant_86959" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2510"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1200" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20824,30 +19518,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2510">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1231" name="Constant_147722" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="303006748" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8600, q.63"/>
-			</rt_info>
+		<layer id="1201" name="Constant_85258_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="151503744" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1232" name="q.63" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1202" name="Constant_85258" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8600, q.63"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1203" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20860,29 +19566,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.63">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1233" name="onnx::Reshape_8616" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="309560348" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8616"/>
-			</rt_info>
+		<layer id="1204" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8607,onnx::Reshape_8616,onnx::Reshape_8625,onnx::Reshape_8680,onnx::Reshape_8689,onnx::Reshape_8698">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1234" name="onnx::Transpose_2530" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2530"/>
-			</rt_info>
+		<layer id="1205" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20894,7 +19594,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2530">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -20902,21 +19602,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1235" name="Constant_7801" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7801"/>
-			</rt_info>
+		<layer id="1206" name="Constant_17974" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1236" name="onnx::Reshape_2531" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2531"/>
-			</rt_info>
+		<layer id="1207" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20929,7 +19623,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2531">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -20937,22 +19631,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1237" name="onnx::Reshape_8620" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="309560380" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8620"/>
-			</rt_info>
+		<layer id="1208" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8611,onnx::Reshape_8620,onnx::Reshape_8629,onnx::Reshape_8684,onnx::Reshape_8693,onnx::Reshape_8702">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1238" name="q.67" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.67"/>
-			</rt_info>
+		<layer id="1209" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -20965,30 +19653,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.67">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1239" name="Constant_147729" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="309560404" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.63, onnx::MatMul_8601"/>
-			</rt_info>
+		<layer id="1210" name="Constant_85265_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="154780600" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1240" name="k.63" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1211" name="Constant_85265" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.63, onnx::MatMul_8601"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1212" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21001,18 +19701,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.63">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1241" name="onnx::Transpose_2555" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2555"/>
-			</rt_info>
+		<layer id="1213" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1214" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21024,7 +19729,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2555">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -21032,21 +19737,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1242" name="Constant_7812" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7812"/>
-			</rt_info>
+		<layer id="1215" name="Constant_17990" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1243" name="onnx::Reshape_2556" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2556"/>
-			</rt_info>
+		<layer id="1216" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21059,7 +19758,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2556">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -21067,11 +19766,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1244" name="k.67" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.67"/>
-			</rt_info>
+		<layer id="1217" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1218" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21084,18 +19788,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.67">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1245" name="onnx::Mul_2592" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="1219" name="Constant_86960_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1220" name="Constant_86960" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2592"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1221" name="Multiply_86191" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -21103,59 +19834,43 @@
 					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2592">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>256</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1246" name="Constant_150370" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1247" name="onnx::Softmax_2594" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_2594"/>
-			</rt_info>
+		<layer id="1222" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_2594">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Add_output_0,/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>256</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1248" name="attn.31" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.31"/>
-			</rt_info>
+		<layer id="1223" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -21164,30 +19879,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.31">
+				<port id="1" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>256</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1249" name="Constant_147736" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="316114008" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8602, v.63"/>
-			</rt_info>
+		<layer id="1224" name="Constant_85272_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="158057402" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1250" name="v.63" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1225" name="Constant_85272" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8602, v.63"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1226" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21200,18 +19927,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.63">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1251" name="onnx::Transpose_2580" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2580"/>
-			</rt_info>
+		<layer id="1227" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1228" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21223,7 +19955,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2580">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -21231,21 +19963,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1252" name="Constant_7820" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7820"/>
-			</rt_info>
+		<layer id="1229" name="Constant_18006" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1253" name="onnx::Reshape_2581" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2581"/>
-			</rt_info>
+		<layer id="1230" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21258,7 +19984,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2581">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -21266,11 +19992,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1254" name="v.67" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.67"/>
-			</rt_info>
+		<layer id="1231" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1232" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21283,18 +20014,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.67">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1255" name="out.31" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.31"/>
-			</rt_info>
+		<layer id="1233" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -21308,178 +20036,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.31">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1256" name="onnx::Gather_2597" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2597, onnx::Gather_2600, onnx::Gather_2603"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2597,onnx::Gather_2600,onnx::Gather_2603">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1257" name="onnx::Gather_2598" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2598"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2598"/>
-			</output>
-		</layer>
-		<layer id="1258" name="Constant_7832" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7832"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1259" name="onnx::Div_2599" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7832, onnx::Div_2599, onnx::Gather_2598"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_2599"/>
-			</output>
-		</layer>
-		<layer id="1260" name="onnx::Div_2606" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2606"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2606"/>
-			</output>
-		</layer>
-		<layer id="1261" name="onnx::Cast_2607" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2607, onnx::Cast_2608, onnx::Div_2606, onnx::Unsqueeze_2609"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2607,onnx::Cast_2608,onnx::Unsqueeze_2609"/>
-			</output>
-		</layer>
-		<layer id="1262" name="onnx::Unsqueeze_2611" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2611"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2611">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1263" name="onnx::Concat_2612" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2612, onnx::Unsqueeze_2611"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2612">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1264" name="Constant_90642" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2619"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1265" name="Constant_90643" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1266" name="Gather_90644" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2619"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="1267" name="onnx::Reshape_2619" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2619"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="1234" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2619">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1268" name="onnx::Transpose_2620" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2620"/>
-			</rt_info>
+		<layer id="1235" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -21491,7 +20064,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2620">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -21499,21 +20072,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1269" name="Constant_7953" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7953"/>
-			</rt_info>
+		<layer id="1236" name="Constant_18032" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1270" name="onnx::Reshape_2621" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2621"/>
-			</rt_info>
+		<layer id="1237" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21526,7 +20093,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2621">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -21534,205 +20101,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1271" name="onnx::Div_2622" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2622"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2622"/>
-			</output>
-		</layer>
-		<layer id="1272" name="onnx::Cast_2623" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2623, onnx::Cast_2624, onnx::Div_2622, onnx::Unsqueeze_2625"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2623,onnx::Cast_2624,onnx::Unsqueeze_2625"/>
-			</output>
-		</layer>
-		<layer id="1273" name="onnx::Unsqueeze_2628" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2628"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2628">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1274" name="onnx::Concat_2629" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2629, onnx::Unsqueeze_2628"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2629">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1275" name="Constant_88419" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7836, onnx::Concat_2631, onnx::Gather_2601, onnx::Unsqueeze_2602, onnx::Unsqueeze_2630"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1276" name="Constant_7836" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7836"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1277" name="onnx::Unsqueeze_2602" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7836, onnx::Concat_2631, onnx::Gather_2601, onnx::Unsqueeze_2602, onnx::Unsqueeze_2630"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_2631">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1278" name="onnx::Gather_2604" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2604"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2604"/>
-			</output>
-		</layer>
-		<layer id="1279" name="Constant_7840" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7840"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1280" name="onnx::Unsqueeze_2605" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_7840, onnx::Gather_2604, onnx::Unsqueeze_2605"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_2605"/>
-			</output>
-		</layer>
-		<layer id="1281" name="onnx::Mul_2626" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2626"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2626"/>
-			</output>
-		</layer>
-		<layer id="1282" name="onnx::Unsqueeze_2627" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2626, onnx::Unsqueeze_2627"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_2627"/>
-			</output>
-		</layer>
-		<layer id="1283" name="onnx::Unsqueeze_2632" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2632"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2632">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1284" name="onnx::Concat_2633" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2633, onnx::Unsqueeze_2632"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2633">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1285" name="onnx::Reshape_2634" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2634"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1238" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2634">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1286" name="onnx::MatMul_2635" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2635"/>
-			</rt_info>
+		<layer id="1239" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21745,30 +20123,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2635">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1287" name="Constant_147743" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="322667608" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2637, onnx::MatMul_8631"/>
-			</rt_info>
+		<layer id="1240" name="Constant_85279_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="161334234" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1288" name="onnx::Add_2637" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1241" name="Constant_85279" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2637, onnx::MatMul_8631"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1242" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21781,18 +20171,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2637">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1289" name="input.292" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.292"/>
-			</rt_info>
+		<layer id="1243" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -21806,18 +20193,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.292">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1290" name="input.296" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.296"/>
-			</rt_info>
+		<layer id="1244" name="/down_blocks.2/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21831,29 +20215,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.296">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1291" name="Constant_8074" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8074"/>
-			</rt_info>
+		<layer id="1245" name="Constant_18044" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1292" name="onnx::Mul_2648" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2644, onnx::Div_2647, onnx::Mul_2648, onnx::Pow_2641, onnx::ReduceMean_2643, onnx::Sqrt_2646, onnx::Sub_2640"/>
-			</rt_info>
+		<layer id="1246" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21865,28 +20243,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2648">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1293" name="Constant_150372" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="329221208" size="5120"/>
+		<layer id="1247" name="Constant_86962_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="164611034" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1294" name="onnx::Add_2649" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1248" name="Constant_86962" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2649"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1249" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21900,28 +20295,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2649">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1295" name="Constant_150373" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="329226328" size="5120"/>
+		<layer id="1250" name="Constant_86963_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="164613594" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1296" name="onnx::MatMul_2650" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1251" name="Constant_86963" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2650"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1252" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21935,30 +20347,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2650">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1297" name="Constant_147751" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="329231448" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8632, q.71"/>
-			</rt_info>
+		<layer id="1253" name="Constant_85287_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="164616154" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1298" name="q.71" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1254" name="Constant_85287" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8632, q.71"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1255" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -21971,183 +20395,51 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.71">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1299" name="Constant_107337" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="1256" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1300" name="onnx::Gather_2657" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2657, onnx::Gather_2660, onnx::Gather_2663"/>
-			</rt_info>
+		<layer id="1257" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2657,onnx::Gather_2660,onnx::Gather_2663">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1301" name="onnx::Gather_2664" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2664"/>
-			</rt_info>
+		<layer id="1258" name="Constant_18067" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2664"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1302" name="Constant_8100" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8100"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1303" name="onnx::Div_2665" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8100, onnx::Div_2665, onnx::Gather_2664"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_2665"/>
-			</output>
-		</layer>
-		<layer id="1304" name="onnx::Div_2666" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2666"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2666"/>
-			</output>
-		</layer>
-		<layer id="1305" name="onnx::Cast_2667" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2667, onnx::Cast_2668, onnx::Div_2666, onnx::Unsqueeze_2669"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2667,onnx::Cast_2668,onnx::Unsqueeze_2669"/>
-			</output>
-		</layer>
-		<layer id="1306" name="onnx::Unsqueeze_2677" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2677"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2677">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1307" name="onnx::Concat_2678" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2678, onnx::Unsqueeze_2677"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2678">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1308" name="onnx::Reshape_2679" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2679"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1309" name="onnx::Transpose_2680" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2680"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2680">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1310" name="Constant_8213" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8213"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1311" name="onnx::Reshape_2681" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2681"/>
-			</rt_info>
+		<layer id="1259" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22160,7 +20452,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2681">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -22168,205 +20460,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1312" name="onnx::Gather_2658" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2658"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2658"/>
-			</output>
-		</layer>
-		<layer id="1313" name="Constant_8092" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8092"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1314" name="onnx::Unsqueeze_2659" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8092, onnx::Gather_2658, onnx::Unsqueeze_2659"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_2659"/>
-			</output>
-		</layer>
-		<layer id="1315" name="onnx::Mul_2682" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2682"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2682"/>
-			</output>
-		</layer>
-		<layer id="1316" name="onnx::Unsqueeze_2683" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2682, onnx::Unsqueeze_2683"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_2683"/>
-			</output>
-		</layer>
-		<layer id="1317" name="onnx::Unsqueeze_2688" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2688"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2688">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1318" name="onnx::Concat_2689" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2689, onnx::Unsqueeze_2688"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2689">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1319" name="Constant_88446" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8096, onnx::Concat_2691, onnx::Gather_2661, onnx::Unsqueeze_2662, onnx::Unsqueeze_2690"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1320" name="Constant_8096" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8096"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1321" name="onnx::Unsqueeze_2662" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8096, onnx::Concat_2691, onnx::Gather_2661, onnx::Unsqueeze_2662, onnx::Unsqueeze_2690"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_2691">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1322" name="onnx::Div_2684" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2684"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_2684"/>
-			</output>
-		</layer>
-		<layer id="1323" name="onnx::Cast_2685" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2685, onnx::Cast_2686, onnx::Div_2684, onnx::Unsqueeze_2687"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2685,onnx::Cast_2686,onnx::Unsqueeze_2687"/>
-			</output>
-		</layer>
-		<layer id="1324" name="onnx::Unsqueeze_2692" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2692"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2692">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1325" name="onnx::Concat_2693" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2693, onnx::Unsqueeze_2692"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2693">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1326" name="onnx::Reshape_2694" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2694"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1260" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2694">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1327" name="q.75" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.75"/>
-			</rt_info>
+		<layer id="1261" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22379,30 +20482,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.75">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1328" name="Constant_147758" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="335785048" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.71, onnx::MatMul_8633"/>
-			</rt_info>
+		<layer id="1262" name="Constant_85294_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="167892954" size="1966080" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1329" name="k.71" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1263" name="Constant_85294" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.71, onnx::MatMul_8633"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1264" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22415,29 +20530,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.71">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1330" name="onnx::Reshape_8649" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="339717208" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8649"/>
-			</rt_info>
+		<layer id="1265" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8640,onnx::Reshape_8649,onnx::Reshape_8713,onnx::Reshape_8722,onnx::Reshape_8794,onnx::Reshape_8803,onnx::Reshape_8856,onnx::Reshape_8865,onnx::Reshape_8896,onnx::Reshape_8905,onnx::Reshape_8936,onnx::Reshape_8945">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1331" name="onnx::Transpose_2709" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2709"/>
-			</rt_info>
+		<layer id="1266" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22449,7 +20558,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2709">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -22457,21 +20566,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1332" name="Constant_8337" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8337"/>
-			</rt_info>
+		<layer id="1267" name="Constant_18083" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1333" name="onnx::Reshape_2710" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2710"/>
-			</rt_info>
+		<layer id="1268" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22484,7 +20587,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2710">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -22492,22 +20595,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1334" name="onnx::Reshape_8653" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="339717240" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8653"/>
-			</rt_info>
+		<layer id="1269" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8644,onnx::Reshape_8653,onnx::Reshape_8717,onnx::Reshape_8726,onnx::Reshape_8798,onnx::Reshape_8807,onnx::Reshape_8860,onnx::Reshape_8869,onnx::Reshape_8900,onnx::Reshape_8909,onnx::Reshape_8940,onnx::Reshape_8949">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1335" name="k.75" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.75"/>
-			</rt_info>
+		<layer id="1270" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22520,78 +20617,89 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.75">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1336" name="onnx::Mul_2746" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="1271" name="Constant_86964_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1272" name="Constant_86964" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2746"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2746">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1337" name="Constant_150374" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
-			<output>
+		<layer id="1273" name="Multiply_86193" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1338" name="onnx::Softmax_2748" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_2748"/>
-			</rt_info>
+		<layer id="1274" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_2748">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Add_output_0,/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1339" name="attn.35" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.35"/>
-			</rt_info>
+		<layer id="1275" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -22600,30 +20708,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.35">
+				<port id="1" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1340" name="Constant_147765" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="339717264" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8634, v.71"/>
-			</rt_info>
+		<layer id="1276" name="Constant_85301_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="169859090" size="1966080" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1341" name="v.71" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1277" name="Constant_85301" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8634, v.71"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1278" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22636,18 +20756,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.71">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1342" name="onnx::Transpose_2734" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2734"/>
-			</rt_info>
+		<layer id="1279" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1280" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22659,7 +20784,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2734">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -22667,21 +20792,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1343" name="Constant_8348" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8348"/>
-			</rt_info>
+		<layer id="1281" name="Constant_18099" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1344" name="onnx::Reshape_2735" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2735"/>
-			</rt_info>
+		<layer id="1282" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22694,7 +20813,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2735">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -22702,11 +20821,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1345" name="v.75" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.75"/>
-			</rt_info>
+		<layer id="1283" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1284" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -22719,18 +20843,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.75">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1346" name="out.35" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.35"/>
-			</rt_info>
+		<layer id="1285" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -22744,585 +20865,304 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.35">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1347" name="onnx::Gather_2751" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2751, onnx::Gather_2754, onnx::Gather_2757"/>
-			</rt_info>
+		<layer id="1286" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1287" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2751,onnx::Gather_2754,onnx::Gather_2757">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1348" name="onnx::Gather_2752" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2752"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2752"/>
-			</output>
-		</layer>
-		<layer id="1349" name="Constant_8360" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8360"/>
-			</rt_info>
+		<layer id="1288" name="Constant_18125" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1350" name="onnx::Div_2753" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8360, onnx::Div_2753, onnx::Gather_2752"/>
-			</rt_info>
+		<layer id="1289" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_2753"/>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1351" name="onnx::Div_2760" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2760"/>
-			</rt_info>
+		<layer id="1290" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_2760"/>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1352" name="onnx::Cast_2761" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2761, onnx::Cast_2762, onnx::Div_2760, onnx::Unsqueeze_2763"/>
-			</rt_info>
+		<layer id="1291" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2761,onnx::Cast_2762,onnx::Unsqueeze_2763"/>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1353" name="onnx::Unsqueeze_2765" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2765"/>
-			</rt_info>
+		<layer id="1292" name="Constant_85308_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="171825170" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2765">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1354" name="onnx::Concat_2766" type="Unsqueeze" version="opset1">
+		<layer id="1293" name="Constant_85308" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2766, onnx::Unsqueeze_2765"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2766">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1355" name="Constant_90652" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2773"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1356" name="Constant_90653" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1357" name="Gather_90654" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2773"/>
-			</rt_info>
+		<layer id="1294" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1358" name="onnx::Reshape_2773" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_2773"/>
-			</rt_info>
+		<layer id="1295" name="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2773">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1359" name="onnx::Transpose_2774" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2774"/>
-			</rt_info>
+		<layer id="1296" name="/down_blocks.2/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2774">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1360" name="Constant_8481" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8481"/>
-			</rt_info>
+		<layer id="1297" name="Constant_18137" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1361" name="onnx::Reshape_2775" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2775"/>
-			</rt_info>
+		<layer id="1298" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2775">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1362" name="onnx::Div_2776" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2776"/>
-			</rt_info>
+		<layer id="1299" name="Constant_86966_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="175101970" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_2776"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1363" name="onnx::Cast_2777" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="1300" name="Constant_86966" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_2777, onnx::Cast_2778, onnx::Div_2776, onnx::Unsqueeze_2779"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_2777,onnx::Cast_2778,onnx::Unsqueeze_2779"/>
-			</output>
-		</layer>
-		<layer id="1364" name="onnx::Unsqueeze_2782" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2782"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2782">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1365" name="onnx::Concat_2783" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2783, onnx::Unsqueeze_2782"/>
-			</rt_info>
+		<layer id="1301" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2783">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1366" name="Constant_88473" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8364, onnx::Concat_2785, onnx::Gather_2755, onnx::Unsqueeze_2756, onnx::Unsqueeze_2784"/>
-			</rt_info>
+		<layer id="1302" name="Constant_86967_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="175104530" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1367" name="Constant_8364" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8364"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1368" name="onnx::Unsqueeze_2756" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8364, onnx::Concat_2785, onnx::Gather_2755, onnx::Unsqueeze_2756, onnx::Unsqueeze_2784"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_2785">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1369" name="onnx::Gather_2758" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2758"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2758"/>
-			</output>
-		</layer>
-		<layer id="1370" name="Constant_8368" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8368"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1371" name="onnx::Unsqueeze_2759" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8368, onnx::Gather_2758, onnx::Unsqueeze_2759"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_2759"/>
-			</output>
-		</layer>
-		<layer id="1372" name="onnx::Mul_2780" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2780"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2780"/>
-			</output>
-		</layer>
-		<layer id="1373" name="onnx::Unsqueeze_2781" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2780, onnx::Unsqueeze_2781"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_2781"/>
-			</output>
-		</layer>
-		<layer id="1374" name="onnx::Unsqueeze_2786" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2786"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2786">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1375" name="onnx::Concat_2787" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_2787, onnx::Unsqueeze_2786"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_2787">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1376" name="onnx::Reshape_2788" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2788"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_2788">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1377" name="onnx::MatMul_2789" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2789"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2789">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1378" name="Constant_147772" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="343649424" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2791, onnx::MatMul_8655"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1379" name="onnx::Add_2791" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1303" name="Constant_86967" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2791, onnx::MatMul_8655"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2791">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1380" name="input.300" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.300"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.300">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1381" name="input.304" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.304"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.304">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1382" name="Constant_8602" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8602"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1383" name="onnx::Mul_2802" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2798, onnx::Div_2801, onnx::Mul_2802, onnx::Pow_2795, onnx::ReduceMean_2797, onnx::Sqrt_2800, onnx::Sub_2794"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2802">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1384" name="Constant_150376" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="350203024" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1385" name="onnx::Add_2803" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2803"/>
-			</rt_info>
+		<layer id="1304" name="/down_blocks.2/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23336,65 +21176,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2803">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1386" name="Constant_150377" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="350208144" size="5120"/>
+		<layer id="1305" name="Constant_85316_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="10240, 1280" offset="175107090" size="26214400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1387" name="onnx::MatMul_2804" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1306" name="Constant_85316" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2804"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2804">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1388" name="Constant_147780" type="Const" version="opset1">
-			<data element_type="f32" shape="10240, 1280" offset="350213264" size="52428800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2806, onnx::MatMul_8656"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1389" name="onnx::Add_2806" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2806, onnx::MatMul_8656"/>
-			</rt_info>
+		<layer id="1307" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23407,18 +21224,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2806">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1390" name="onnx::Shape_2807" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_2807"/>
-			</rt_info>
+		<layer id="1308" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -23432,51 +21246,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_2807">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1391" name="Constant_126471" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1309" name="Constant_77012" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1392" name="Constant_126472" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1310" name="Constant_77013" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1393" name="Constant_126468" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1311" name="Constant_77009" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1394" name="onnx::Gather_2808" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2808"/>
-			</rt_info>
+		<layer id="1312" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23485,36 +21287,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_2808">
+				<port id="1" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1395" name="onnx::Gather_2809" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1313" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_2809">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1396" name="Constant_8619" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8619"/>
-			</rt_info>
+		<layer id="1314" name="Constant_18154" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="1397" name="onnx::Add_2810" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8619, onnx::Add_2810, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1315" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -23522,30 +21315,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_2810">
+				<port id="3" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1398" name="onnx::Add_2812" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2812"/>
-			</rt_info>
+		<layer id="1316" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_2812">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1399" name="onnx::Div_2813" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2812, onnx::Div_2813"/>
-			</rt_info>
+		<layer id="1317" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -23555,27 +21342,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_2813">
+				<port id="2" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1400" name="onnx::Div_2814" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2814"/>
-			</rt_info>
+		<layer id="1318" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_2814">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1401" name="onnx::Mul_2815" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_2814, onnx::Mul_2815, onnx::Mul_2816, onnx::Slice_2817"/>
-			</rt_info>
+		<layer id="1319" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -23585,26 +21366,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_2815,onnx::Slice_2817">
+				<port id="2" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1402" name="Constant_126467" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1320" name="Constant_77008" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1403" name="ScatterUpdate_126473" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1321" name="ScatterUpdate_77014" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -23625,22 +21400,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1404" name="Constant_126476" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1322" name="Constant_77017" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1405" name="onnx::Mul_2818" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8630, onnx::Gather_2809, onnx::Mul_2818"/>
-			</rt_info>
+		<layer id="1323" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23658,50 +21427,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_2818">
+				<port id="4" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1406" name="Constant_126540" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1324" name="Constant_77081" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1407" name="Constant_126539" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1325" name="Constant_77080" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1408" name="Constant_126538" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1326" name="Constant_77079" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1409" name="ScatterUpdate_126541" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1327" name="ScatterUpdate_77082" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -23722,33 +21479,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1410" name="Constant_126542" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1328" name="Constant_77083" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1411" name="onnx::Mul_2819" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2819"/>
-			</rt_info>
+		<layer id="1329" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_2819">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1412" name="onnx::Slice_2820" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2819, onnx::Slice_2820"/>
-			</rt_info>
+		<layer id="1330" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -23758,15 +21506,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_2820">
+				<port id="2" precision="I64" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1413" name="ScatterUpdate_126543" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1331" name="ScatterUpdate_77084" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -23787,22 +21532,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1414" name="Constant_126546" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1332" name="Constant_77087" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1415" name="onnx::Div_2821" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_8699, onnx::Div_2821, onnx::Gather_2809"/>
-			</rt_info>
+		<layer id="1333" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23820,18 +21559,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_2821">
+				<port id="4" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1416" name="onnx::Mul_2829" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2824, onnx::Erf_2823, onnx::Mul_2826, onnx::Mul_2827, onnx::Mul_2829"/>
-			</rt_info>
+		<layer id="1334" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23840,18 +21576,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_2829">
+				<port id="1" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1417" name="input.308" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.308"/>
-			</rt_info>
+		<layer id="1335" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23865,30 +21598,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.308">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1418" name="Constant_147788" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 5120" offset="402642064" size="26214400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2832, onnx::MatMul_8657"/>
-			</rt_info>
+		<layer id="1336" name="Constant_85324_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 5120" offset="201321490" size="13107200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1419" name="onnx::Add_2832" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1337" name="Constant_85324" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2832, onnx::MatMul_8657"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1338" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23901,18 +21646,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2832">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1420" name="onnx::Add_2833" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2833"/>
-			</rt_info>
+		<layer id="1339" name="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -23926,18 +21668,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2833">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1421" name="onnx::Reshape_2834" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2834"/>
-			</rt_info>
+		<layer id="1340" name="/down_blocks.2/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23951,29 +21690,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2834">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1422" name="onnx::Reshape_8735" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="428856464" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8735"/>
-			</rt_info>
+		<layer id="1341" name="/down_blocks.2/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="214428690" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8662,onnx::Reshape_8735">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.0/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1423" name="onnx::Transpose_2844" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2844"/>
-			</rt_info>
+		<layer id="1342" name="/down_blocks.2/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -23985,7 +21718,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2844">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>16</dim>
 					<dim>16</dim>
@@ -23993,21 +21726,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1424" name="Constant_8783" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8783"/>
-			</rt_info>
+		<layer id="1343" name="Constant_18319" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1425" name="input.312" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.312"/>
-			</rt_info>
+		<layer id="1344" name="/down_blocks.2/attentions.0/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24020,7 +21747,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.312">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24028,13 +21755,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1426" name="m.down_blocks.2.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="428856496" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.attentions.0.proj_out.weight"/>
-			</rt_info>
+		<layer id="1345" name="down_blocks.2.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="214428722" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.attentions.0.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24042,11 +21766,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1427" name="Convolution_8785" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1346" name="down_blocks.2.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_8785"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.attentions.0.proj_out.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1347" name="/down_blocks.2/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24070,10 +21813,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1428" name="Reshape_8805" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="435410096" size="5120"/>
+		<layer id="1348" name="Reshape_18341_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="217705522" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24081,11 +21824,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1429" name="onnx::Add_2846" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1349" name="Reshape_18341" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_8804, Reshape_8805, onnx::Add_2846"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1350" name="/down_blocks.2/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24101,7 +21863,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2846">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24109,11 +21871,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1430" name="onnx::Cast_2847" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.316, onnx::Cast_2847"/>
-			</rt_info>
+		<layer id="1351" name="/down_blocks.2/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24129,7 +21888,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.316,onnx::Cast_2847">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24137,22 +21896,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1431" name="onnx::Reshape_2849" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2849"/>
-			</rt_info>
+		<layer id="1352" name="/down_blocks.2/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2849">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1432" name="onnx::InstanceNormalization_2850" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2850"/>
-			</rt_info>
+		<layer id="1353" name="/down_blocks.2/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24165,29 +21918,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2850">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1433" name="Constant_8844" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_8844"/>
-			</rt_info>
+		<layer id="1354" name="Constant_18379" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1434" name="MVN_8845" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_8864, Concat_8909, MVN_8845, Multiply_8892, Reshape_8865, Reshape_8910, onnx::Reshape_2853"/>
-			</rt_info>
+		<layer id="1355" name="MVN_18380" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24199,18 +21946,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2853">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1435" name="onnx::Reshape_2854" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2854"/>
-			</rt_info>
+		<layer id="1356" name="/down_blocks.2/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24220,16 +21964,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2854">
+				<port id="1" precision="I64" names="/down_blocks.2/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1436" name="onnx::Mul_2855" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2855"/>
-			</rt_info>
+		<layer id="1357" name="/down_blocks.2/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24241,7 +21982,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2855">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24249,10 +21990,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1437" name="Constant_150380" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="435415216" size="5120"/>
+		<layer id="1358" name="Constant_86970_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="217708082" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24260,11 +22001,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1438" name="onnx::Add_2858" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1359" name="Constant_86970" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2858"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1360" name="/down_blocks.2/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24280,7 +22040,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2858">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24288,10 +22048,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1439" name="Constant_150381" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="435420336" size="5120"/>
+		<layer id="1361" name="Constant_86971_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="217710642" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24299,11 +22059,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1440" name="onnx::Cast_2861" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1362" name="Constant_86971" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.320, onnx::Cast_2861"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1363" name="/down_blocks.2/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24319,7 +22098,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.320,onnx::Cast_2861">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24327,10 +22106,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1441" name="input.324" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.324, onnx::Mul_2863"/>
-			</rt_info>
+		<layer id="1364" name="/down_blocks.2/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24340,7 +22116,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.324">
+				<port id="1" precision="FP32" names="/down_blocks.2/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24348,13 +22124,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1442" name="m.down_blocks.2.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="435425456" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="1365" name="down_blocks.2.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="217713202" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.1.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -24362,11 +22135,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1443" name="Convolution_8950" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1366" name="down_blocks.2.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_8950"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.1.conv1.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1367" name="/down_blocks.2/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24390,10 +22182,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1444" name="Reshape_8970" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="494407856" size="5120"/>
+		<layer id="1368" name="Reshape_18504_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="247204402" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24401,11 +22193,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1445" name="onnx::Add_2865" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1369" name="Reshape_18504" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_8969, Reshape_8970, onnx::Add_2865"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1370" name="/down_blocks.2/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24421,7 +22232,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2865">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24429,40 +22240,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1446" name="onnx::Gemm_2867" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_2867, onnx::Mul_2866"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1371" name="down_blocks.2.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="247206962" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_2867">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1447" name="m.down_blocks.2.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="494412976" size="6553600"/>
+		<layer id="1372" name="down_blocks.2.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.1.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.1.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.1.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1448" name="MatMul_9002" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_9002"/>
-			</rt_info>
+		<layer id="1373" name="/down_blocks.2/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24480,20 +22286,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1449" name="Constant_150382" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="500966576" size="5120"/>
+		<layer id="1374" name="Constant_86972_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="250483762" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1450" name="onnx::Unsqueeze_2868" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1375" name="Constant_86972" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_9003, onnx::Unsqueeze_2868"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1376" name="/down_blocks.2/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24505,27 +22326,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_2868">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1451" name="onnx::Unsqueeze_2869" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2869"/>
-			</rt_info>
+		<layer id="1377" name="/down_blocks.2/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2869">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1452" name="onnx::Unsqueeze_2870" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2870"/>
-			</rt_info>
+		<layer id="1378" name="/down_blocks.2/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24536,28 +22351,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_2870">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1453" name="onnx::Unsqueeze_2871" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_2871"/>
-			</rt_info>
+		<layer id="1379" name="/down_blocks.2/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_2871">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1454" name="onnx::Add_2872" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2872"/>
-			</rt_info>
+		<layer id="1380" name="/down_blocks.2/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24569,7 +22378,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2872">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24577,11 +22386,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1455" name="onnx::Cast_2873" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.328, onnx::Cast_2873"/>
-			</rt_info>
+		<layer id="1381" name="/down_blocks.2/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24597,7 +22403,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.328,onnx::Cast_2873">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24605,22 +22411,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1456" name="onnx::Reshape_2875" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2875"/>
-			</rt_info>
+		<layer id="1382" name="/down_blocks.2/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2875">
+				<port id="0" precision="I64" names="/down_blocks.2/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1457" name="onnx::InstanceNormalization_2876" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2876"/>
-			</rt_info>
+		<layer id="1383" name="/down_blocks.2/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24633,29 +22433,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2876">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1458" name="Constant_9020" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9020"/>
-			</rt_info>
+		<layer id="1384" name="Constant_18552" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1459" name="MVN_9021" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_9040, Concat_9085, MVN_9021, Multiply_9068, Reshape_9041, Reshape_9086, onnx::Reshape_2879"/>
-			</rt_info>
+		<layer id="1385" name="MVN_18553" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24667,18 +22461,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2879">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1460" name="onnx::Reshape_2880" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2880"/>
-			</rt_info>
+		<layer id="1386" name="/down_blocks.2/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24688,16 +22479,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2880">
+				<port id="1" precision="I64" names="/down_blocks.2/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1461" name="onnx::Mul_2881" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2881"/>
-			</rt_info>
+		<layer id="1387" name="/down_blocks.2/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24709,7 +22497,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2881">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24717,10 +22505,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1462" name="Constant_150383" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="500971696" size="5120"/>
+		<layer id="1388" name="Constant_86973_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="250486322" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24728,11 +22516,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1463" name="onnx::Add_2884" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1389" name="Constant_86973" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2884"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1390" name="/down_blocks.2/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24748,7 +22555,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2884">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24756,10 +22563,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1464" name="Constant_150384" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="500976816" size="5120"/>
+		<layer id="1391" name="Constant_86974_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="250488882" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24767,11 +22574,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1465" name="onnx::Cast_2887" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1392" name="Constant_86974" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.332, onnx::Cast_2887"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1393" name="/down_blocks.2/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24787,7 +22613,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.332,onnx::Cast_2887">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24795,10 +22621,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1466" name="input.336" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.336, onnx::Mul_2889"/>
-			</rt_info>
+		<layer id="1394" name="/down_blocks.2/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24808,7 +22631,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.336">
+				<port id="1" precision="FP32" names="/down_blocks.2/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24816,13 +22639,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1467" name="m.down_blocks.2.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="500981936" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="1395" name="down_blocks.2.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="250491442" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -24830,11 +22650,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1468" name="Convolution_9126" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1396" name="down_blocks.2.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_9126"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.resnets.1.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1397" name="/down_blocks.2/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24858,10 +22697,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1469" name="Reshape_9146" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="559964336" size="5120"/>
+		<layer id="1398" name="Reshape_18677_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="279982642" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -24869,11 +22708,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1470" name="onnx::Add_2891" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1399" name="Reshape_18677" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_9145, Reshape_9146, onnx::Add_2891"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1400" name="/down_blocks.2/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24889,7 +22747,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2891">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24897,11 +22755,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1471" name="onnx::Div_2892" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.340, onnx::Div_2892"/>
-			</rt_info>
+		<layer id="1401" name="/down_blocks.2/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24917,7 +22772,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.340,onnx::Div_2892">
+				<port id="2" precision="FP32" names="/down_blocks.2/resnets.1/Add_1_output_0,/down_blocks.2/resnets.1/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -24925,22 +22780,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1472" name="onnx::Reshape_2899" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2899"/>
-			</rt_info>
+		<layer id="1402" name="/down_blocks.2/attentions.1/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_2899">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1473" name="onnx::InstanceNormalization_2900" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_2900"/>
-			</rt_info>
+		<layer id="1403" name="/down_blocks.2/attentions.1/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24953,29 +22802,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_2900">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1474" name="Constant_9186" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9186"/>
-			</rt_info>
+		<layer id="1404" name="Constant_18717" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1475" name="MVN_9187" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_9206, Concat_9251, MVN_9187, Multiply_9234, Reshape_9207, Reshape_9252, onnx::Reshape_2903"/>
-			</rt_info>
+		<layer id="1405" name="MVN_18718" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -24987,18 +22830,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2903">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1476" name="onnx::Reshape_2904" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2904"/>
-			</rt_info>
+		<layer id="1406" name="/down_blocks.2/attentions.1/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25008,16 +22848,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_2904">
+				<port id="1" precision="I64" names="/down_blocks.2/attentions.1/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1477" name="onnx::Mul_2905" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_2905"/>
-			</rt_info>
+		<layer id="1407" name="/down_blocks.2/attentions.1/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25029,7 +22866,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2905">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -25037,10 +22874,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1478" name="Constant_150385" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="559969456" size="5120"/>
+		<layer id="1408" name="Constant_86975_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="279985202" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -25048,11 +22885,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1479" name="onnx::Add_2908" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1409" name="Constant_86975" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2908"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1410" name="/down_blocks.2/attentions.1/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25068,7 +22924,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2908">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -25076,10 +22932,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1480" name="Constant_150386" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="559974576" size="5120"/>
+		<layer id="1411" name="Constant_86976_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="279987762" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -25087,11 +22943,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1481" name="input.344" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1412" name="Constant_86976" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.344"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1413" name="/down_blocks.2/attentions.1/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25107,7 +22982,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.344">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -25115,13 +22990,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1482" name="m.down_blocks.2.attentions.1.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="559979696" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.attentions.1.proj_in.weight"/>
-			</rt_info>
+		<layer id="1414" name="down_blocks.2.attentions.1.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="279990322" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.attentions.1.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -25129,11 +23001,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1483" name="Convolution_9289" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1415" name="down_blocks.2.attentions.1.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_9289"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.attentions.1.proj_in.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1416" name="/down_blocks.2/attentions.1/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25157,10 +23048,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1484" name="Reshape_9309" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="566533296" size="5120"/>
+		<layer id="1417" name="Reshape_18840_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="283267122" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -25168,11 +23059,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1485" name="onnx::Transpose_2912" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1418" name="Reshape_18840" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_9308, Reshape_9309, onnx::Transpose_2912"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1419" name="/down_blocks.2/attentions.1/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25188,7 +23098,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2912">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -25196,21 +23106,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1486" name="Constant_9337" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9337"/>
-			</rt_info>
+		<layer id="1420" name="Constant_18868" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1487" name="onnx::Reshape_2913" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2913"/>
-			</rt_info>
+		<layer id="1421" name="/down_blocks.2/attentions.1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25223,7 +23127,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2913">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>16</dim>
 					<dim>16</dim>
@@ -25231,11 +23135,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1488" name="input.348" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.348"/>
-			</rt_info>
+		<layer id="1422" name="/down_blocks.2/attentions.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1423" name="/down_blocks.2/attentions.1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25248,29 +23157,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.348">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1489" name="Constant_9342" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9342"/>
-			</rt_info>
+		<layer id="1424" name="Constant_18877" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1490" name="onnx::Mul_2931" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2927, onnx::Div_2930, onnx::Mul_2931, onnx::Pow_2924, onnx::ReduceMean_2926, onnx::Sqrt_2929, onnx::Sub_2923"/>
-			</rt_info>
+		<layer id="1425" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25282,28 +23185,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_2931">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1491" name="Constant_150387" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="566538416" size="5120"/>
+		<layer id="1426" name="Constant_86977_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="283269682" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1492" name="onnx::Add_2932" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1427" name="Constant_86977" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_2932"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1428" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25317,28 +23237,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_2932">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1493" name="Constant_150388" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="566543536" size="5120"/>
+		<layer id="1429" name="Constant_86978_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="283272242" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1494" name="onnx::MatMul_2933" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1430" name="Constant_86978" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_2933"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1431" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25352,30 +23289,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_2933">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1495" name="Constant_147799" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="566548656" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8673, q.79"/>
-			</rt_info>
+		<layer id="1432" name="Constant_85334_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="283274802" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1496" name="q.79" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1433" name="Constant_85334" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8673, q.79"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1434" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25388,18 +23337,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.79">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1497" name="onnx::Transpose_2953" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2953"/>
-			</rt_info>
+		<layer id="1435" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1436" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25411,7 +23365,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2953">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -25419,21 +23373,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1498" name="Constant_9361" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9361"/>
-			</rt_info>
+		<layer id="1437" name="Constant_18900" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1499" name="onnx::Reshape_2954" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2954"/>
-			</rt_info>
+		<layer id="1438" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25446,7 +23394,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2954">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -25454,11 +23402,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1500" name="q.83" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.83"/>
-			</rt_info>
+		<layer id="1439" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1440" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25471,30 +23424,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.83">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1501" name="Constant_147806" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="573102256" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.79, onnx::MatMul_8674"/>
-			</rt_info>
+		<layer id="1441" name="Constant_85341_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="286551602" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1502" name="k.79" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1442" name="Constant_85341" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.79, onnx::MatMul_8674"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1443" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25507,18 +23472,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.79">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1503" name="onnx::Transpose_2978" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_2978"/>
-			</rt_info>
+		<layer id="1444" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1445" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25530,7 +23500,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_2978">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -25538,21 +23508,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1504" name="Constant_9369" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9369"/>
-			</rt_info>
+		<layer id="1446" name="Constant_18916" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1505" name="onnx::Reshape_2979" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_2979"/>
-			</rt_info>
+		<layer id="1447" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25565,7 +23529,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_2979">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -25573,11 +23537,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1506" name="k.83" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.83"/>
-			</rt_info>
+		<layer id="1448" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1449" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25590,18 +23559,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.83">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1507" name="onnx::Mul_3015" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="1450" name="Constant_86979_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1451" name="Constant_86979" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3015"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1452" name="Multiply_86195" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -25609,59 +23605,43 @@
 					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3015">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>256</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1508" name="Constant_150389" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1509" name="onnx::Softmax_3017" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_3017"/>
-			</rt_info>
+		<layer id="1453" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_3017">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Add_output_0,/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Mul_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>256</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1510" name="attn.39" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.39"/>
-			</rt_info>
+		<layer id="1454" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -25670,30 +23650,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.39">
+				<port id="1" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Cast_output_0,/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Softmax_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>256</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1511" name="Constant_147813" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="579655856" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8675, v.79"/>
-			</rt_info>
+		<layer id="1455" name="Constant_85348_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="289828402" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1512" name="v.79" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1456" name="Constant_85348" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8675, v.79"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1457" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25706,18 +23698,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.79">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1513" name="onnx::Transpose_3003" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3003"/>
-			</rt_info>
+		<layer id="1458" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1459" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25729,7 +23726,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3003">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -25737,21 +23734,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1514" name="Constant_9377" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9377"/>
-			</rt_info>
+		<layer id="1460" name="Constant_18932" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1515" name="onnx::Reshape_3004" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3004"/>
-			</rt_info>
+		<layer id="1461" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25764,7 +23755,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3004">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -25772,11 +23763,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1516" name="v.83" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.83"/>
-			</rt_info>
+		<layer id="1462" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1463" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -25789,18 +23785,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.83">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1517" name="out.39" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.39"/>
-			</rt_info>
+		<layer id="1464" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -25814,178 +23807,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.39">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1518" name="onnx::Gather_3020" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3020, onnx::Gather_3023, onnx::Gather_3026"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3020,onnx::Gather_3023,onnx::Gather_3026">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1519" name="onnx::Gather_3021" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3021"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3021"/>
-			</output>
-		</layer>
-		<layer id="1520" name="Constant_9389" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9389"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1521" name="onnx::Div_3022" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9389, onnx::Div_3022, onnx::Gather_3021"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_3022"/>
-			</output>
-		</layer>
-		<layer id="1522" name="onnx::Div_3029" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3029"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3029"/>
-			</output>
-		</layer>
-		<layer id="1523" name="onnx::Cast_3030" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3030, onnx::Cast_3031, onnx::Div_3029, onnx::Unsqueeze_3032"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3030,onnx::Cast_3031,onnx::Unsqueeze_3032"/>
-			</output>
-		</layer>
-		<layer id="1524" name="onnx::Unsqueeze_3034" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3034"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3034">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1525" name="onnx::Concat_3035" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3035, onnx::Unsqueeze_3034"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3035">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1526" name="Constant_90657" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3042"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1527" name="Constant_90658" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1528" name="Gather_90659" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3042"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="1529" name="onnx::Reshape_3042" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3042"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="1465" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3042">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1530" name="onnx::Transpose_3043" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3043"/>
-			</rt_info>
+		<layer id="1466" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -25997,7 +23835,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3043">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -26005,21 +23843,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1531" name="Constant_9510" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9510"/>
-			</rt_info>
+		<layer id="1467" name="Constant_18958" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1532" name="onnx::Reshape_3044" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3044"/>
-			</rt_info>
+		<layer id="1468" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26032,7 +23864,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3044">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -26040,205 +23872,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1533" name="onnx::Div_3045" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3045"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3045"/>
-			</output>
-		</layer>
-		<layer id="1534" name="onnx::Cast_3046" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3046, onnx::Cast_3047, onnx::Div_3045, onnx::Unsqueeze_3048"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3046,onnx::Cast_3047,onnx::Unsqueeze_3048"/>
-			</output>
-		</layer>
-		<layer id="1535" name="onnx::Unsqueeze_3051" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3051"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3051">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1536" name="onnx::Concat_3052" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3052, onnx::Unsqueeze_3051"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3052">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1537" name="Constant_88500" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9393, onnx::Concat_3054, onnx::Gather_3024, onnx::Unsqueeze_3025, onnx::Unsqueeze_3053"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1538" name="Constant_9393" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9393"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1539" name="onnx::Unsqueeze_3025" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9393, onnx::Concat_3054, onnx::Gather_3024, onnx::Unsqueeze_3025, onnx::Unsqueeze_3053"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_3054">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1540" name="onnx::Gather_3027" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3027"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3027"/>
-			</output>
-		</layer>
-		<layer id="1541" name="Constant_9397" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9397"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1542" name="onnx::Unsqueeze_3028" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9397, onnx::Gather_3027, onnx::Unsqueeze_3028"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_3028"/>
-			</output>
-		</layer>
-		<layer id="1543" name="onnx::Mul_3049" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3049"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3049"/>
-			</output>
-		</layer>
-		<layer id="1544" name="onnx::Unsqueeze_3050" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3049, onnx::Unsqueeze_3050"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_3050"/>
-			</output>
-		</layer>
-		<layer id="1545" name="onnx::Unsqueeze_3055" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3055"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3055">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1546" name="onnx::Concat_3056" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3056, onnx::Unsqueeze_3055"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3056">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1547" name="onnx::Reshape_3057" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3057"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1469" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3057">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1548" name="onnx::MatMul_3058" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3058"/>
-			</rt_info>
+		<layer id="1470" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26251,30 +23894,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3058">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1549" name="Constant_147820" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="586209456" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3060, onnx::MatMul_8704"/>
-			</rt_info>
+		<layer id="1471" name="Constant_85355_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="293105202" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1550" name="onnx::Add_3060" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1472" name="Constant_85355" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3060, onnx::MatMul_8704"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1473" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26287,18 +23942,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3060">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1551" name="input.352" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.352"/>
-			</rt_info>
+		<layer id="1474" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -26312,18 +23964,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.352">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1552" name="input.356" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.356"/>
-			</rt_info>
+		<layer id="1475" name="/down_blocks.2/attentions.1/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26337,29 +23986,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.356">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1553" name="Constant_9631" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9631"/>
-			</rt_info>
+		<layer id="1476" name="Constant_18970" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1554" name="onnx::Mul_3071" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3067, onnx::Div_3070, onnx::Mul_3071, onnx::Pow_3064, onnx::ReduceMean_3066, onnx::Sqrt_3069, onnx::Sub_3063"/>
-			</rt_info>
+		<layer id="1477" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26371,28 +24014,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3071">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1555" name="Constant_150391" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="592763056" size="5120"/>
+		<layer id="1478" name="Constant_86981_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="296382002" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1556" name="onnx::Add_3072" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1479" name="Constant_86981" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3072"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1480" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26406,28 +24066,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3072">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1557" name="Constant_150392" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="592768176" size="5120"/>
+		<layer id="1481" name="Constant_86982_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="296384562" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1558" name="onnx::MatMul_3073" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1482" name="Constant_86982" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3073"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1483" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26441,30 +24118,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3073">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1559" name="Constant_147828" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="592773296" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8705, q.87"/>
-			</rt_info>
+		<layer id="1484" name="Constant_85363_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="296387122" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1560" name="q.87" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1485" name="Constant_85363" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8705, q.87"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1486" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26477,161 +24166,35 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.87">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1561" name="Constant_107406" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="1487" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1562" name="onnx::Gather_3080" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3080, onnx::Gather_3083, onnx::Gather_3086"/>
-			</rt_info>
+		<layer id="1488" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3080,onnx::Gather_3083,onnx::Gather_3086">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1563" name="onnx::Gather_3087" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3087"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3087"/>
-			</output>
-		</layer>
-		<layer id="1564" name="Constant_9657" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9657"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1565" name="onnx::Div_3088" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9657, onnx::Div_3088, onnx::Gather_3087"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_3088"/>
-			</output>
-		</layer>
-		<layer id="1566" name="onnx::Div_3089" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3089"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3089"/>
-			</output>
-		</layer>
-		<layer id="1567" name="onnx::Cast_3090" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3090, onnx::Cast_3091, onnx::Div_3089, onnx::Unsqueeze_3092"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3090,onnx::Cast_3091,onnx::Unsqueeze_3092"/>
-			</output>
-		</layer>
-		<layer id="1568" name="onnx::Unsqueeze_3100" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3100"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3100">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1569" name="onnx::Concat_3101" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3101, onnx::Unsqueeze_3100"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3101">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1570" name="onnx::Reshape_3102" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3102"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1571" name="onnx::Transpose_3103" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3103"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3103">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -26639,21 +24202,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1572" name="Constant_9770" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9770"/>
-			</rt_info>
+		<layer id="1489" name="Constant_18993" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1573" name="onnx::Reshape_3104" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3104"/>
-			</rt_info>
+		<layer id="1490" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26666,7 +24223,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3104">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -26674,205 +24231,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1574" name="onnx::Gather_3081" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3081"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3081"/>
-			</output>
-		</layer>
-		<layer id="1575" name="Constant_9649" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9649"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1576" name="onnx::Unsqueeze_3082" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9649, onnx::Gather_3081, onnx::Unsqueeze_3082"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_3082"/>
-			</output>
-		</layer>
-		<layer id="1577" name="onnx::Mul_3105" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3105"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3105"/>
-			</output>
-		</layer>
-		<layer id="1578" name="onnx::Unsqueeze_3106" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3105, onnx::Unsqueeze_3106"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_3106"/>
-			</output>
-		</layer>
-		<layer id="1579" name="onnx::Unsqueeze_3111" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3111"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3111">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1580" name="onnx::Concat_3112" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3112, onnx::Unsqueeze_3111"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3112">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1581" name="Constant_88527" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9653, onnx::Concat_3114, onnx::Gather_3084, onnx::Unsqueeze_3085, onnx::Unsqueeze_3113"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1582" name="Constant_9653" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9653"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1583" name="onnx::Unsqueeze_3085" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9653, onnx::Concat_3114, onnx::Gather_3084, onnx::Unsqueeze_3085, onnx::Unsqueeze_3113"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_3114">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1584" name="onnx::Div_3107" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3107"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3107"/>
-			</output>
-		</layer>
-		<layer id="1585" name="onnx::Cast_3108" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3108, onnx::Cast_3109, onnx::Div_3107, onnx::Unsqueeze_3110"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3108,onnx::Cast_3109,onnx::Unsqueeze_3110"/>
-			</output>
-		</layer>
-		<layer id="1586" name="onnx::Unsqueeze_3115" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3115"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3115">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1587" name="onnx::Concat_3116" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3116, onnx::Unsqueeze_3115"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3116">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1588" name="onnx::Reshape_3117" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3117"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1491" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3117">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1589" name="q.91" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.91"/>
-			</rt_info>
+		<layer id="1492" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26885,30 +24253,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.91">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1590" name="Constant_147835" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="599326896" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.87, onnx::MatMul_8706"/>
-			</rt_info>
+		<layer id="1493" name="Constant_85370_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="299663922" size="1966080" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1591" name="k.87" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1494" name="Constant_85370" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.87, onnx::MatMul_8706"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1495" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26921,18 +24301,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.87">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1592" name="onnx::Transpose_3132" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3132"/>
-			</rt_info>
+		<layer id="1496" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1497" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26944,7 +24329,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3132">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -26952,21 +24337,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1593" name="Constant_9891" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9891"/>
-			</rt_info>
+		<layer id="1498" name="Constant_19009" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1594" name="onnx::Reshape_3133" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3133"/>
-			</rt_info>
+		<layer id="1499" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -26979,7 +24358,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3133">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -26987,11 +24366,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1595" name="k.91" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.91"/>
-			</rt_info>
+		<layer id="1500" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1501" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27004,78 +24388,89 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.91">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1596" name="onnx::Mul_3169" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="1502" name="Constant_86983_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1503" name="Constant_86983" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3169"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3169">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1597" name="Constant_150393" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
-			<output>
+		<layer id="1504" name="Multiply_86197" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1598" name="onnx::Softmax_3171" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_3171"/>
-			</rt_info>
+		<layer id="1505" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_3171">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Add_output_0,/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1599" name="attn.43" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.43"/>
-			</rt_info>
+		<layer id="1506" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -27084,30 +24479,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.43">
+				<port id="1" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Cast_output_0,/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1600" name="Constant_147842" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="603259056" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8707, v.87"/>
-			</rt_info>
+		<layer id="1507" name="Constant_85377_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="301630002" size="1966080" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1601" name="v.87" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1508" name="Constant_85377" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8707, v.87"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1509" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27120,18 +24527,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.87">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1602" name="onnx::Transpose_3157" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3157"/>
-			</rt_info>
+		<layer id="1510" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1511" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27143,7 +24555,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3157">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -27151,21 +24563,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1603" name="Constant_9899" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9899"/>
-			</rt_info>
+		<layer id="1512" name="Constant_19025" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1604" name="onnx::Reshape_3158" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3158"/>
-			</rt_info>
+		<layer id="1513" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27178,7 +24584,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3158">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -27186,11 +24592,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1605" name="v.91" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.91"/>
-			</rt_info>
+		<layer id="1514" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1515" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27203,18 +24614,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.91">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1606" name="out.43" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.43"/>
-			</rt_info>
+		<layer id="1516" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -27228,585 +24636,304 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.43">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1607" name="onnx::Gather_3174" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3174, onnx::Gather_3177, onnx::Gather_3180"/>
-			</rt_info>
+		<layer id="1517" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1518" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
 					<dim>160</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3174,onnx::Gather_3177,onnx::Gather_3180">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1608" name="onnx::Gather_3175" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3175"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3175"/>
-			</output>
-		</layer>
-		<layer id="1609" name="Constant_9911" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9911"/>
-			</rt_info>
+		<layer id="1519" name="Constant_19051" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1610" name="onnx::Div_3176" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9911, onnx::Div_3176, onnx::Gather_3175"/>
-			</rt_info>
+		<layer id="1520" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_3176"/>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1611" name="onnx::Div_3183" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3183"/>
-			</rt_info>
+		<layer id="1521" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_3183"/>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1612" name="onnx::Cast_3184" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3184, onnx::Cast_3185, onnx::Div_3183, onnx::Unsqueeze_3186"/>
-			</rt_info>
+		<layer id="1522" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3184,onnx::Cast_3185,onnx::Unsqueeze_3186"/>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1613" name="onnx::Unsqueeze_3188" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3188"/>
-			</rt_info>
+		<layer id="1523" name="Constant_85384_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="303596082" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3188">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1614" name="onnx::Concat_3189" type="Unsqueeze" version="opset1">
+		<layer id="1524" name="Constant_85384" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3189, onnx::Unsqueeze_3188"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3189">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1615" name="Constant_90667" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3196"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1616" name="Constant_90668" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1617" name="Gather_90669" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3196"/>
-			</rt_info>
+		<layer id="1525" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1618" name="onnx::Reshape_3196" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3196"/>
-			</rt_info>
+		<layer id="1526" name="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3196">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1619" name="onnx::Transpose_3197" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3197"/>
-			</rt_info>
+		<layer id="1527" name="/down_blocks.2/attentions.1/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3197">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1620" name="Constant_10032" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10032"/>
-			</rt_info>
+		<layer id="1528" name="Constant_19063" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1621" name="onnx::Reshape_3198" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3198"/>
-			</rt_info>
+		<layer id="1529" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3198">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1622" name="onnx::Div_3199" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3199"/>
-			</rt_info>
+		<layer id="1530" name="Constant_86985_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="306872882" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_3199"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1623" name="onnx::Cast_3200" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="1531" name="Constant_86985" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3200, onnx::Cast_3201, onnx::Div_3199, onnx::Unsqueeze_3202"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3200,onnx::Cast_3201,onnx::Unsqueeze_3202"/>
-			</output>
-		</layer>
-		<layer id="1624" name="onnx::Unsqueeze_3205" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3205"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3205">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1625" name="onnx::Concat_3206" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3206, onnx::Unsqueeze_3205"/>
-			</rt_info>
+		<layer id="1532" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3206">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm3/Mul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1626" name="Constant_88554" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9915, onnx::Concat_3208, onnx::Gather_3178, onnx::Unsqueeze_3179, onnx::Unsqueeze_3207"/>
-			</rt_info>
+		<layer id="1533" name="Constant_86986_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="306875442" size="2560" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1627" name="Constant_9915" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9915"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1628" name="onnx::Unsqueeze_3179" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9915, onnx::Concat_3208, onnx::Gather_3178, onnx::Unsqueeze_3179, onnx::Unsqueeze_3207"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_3208">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1629" name="onnx::Gather_3181" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3181"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3181"/>
-			</output>
-		</layer>
-		<layer id="1630" name="Constant_9919" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9919"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1631" name="onnx::Unsqueeze_3182" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_9919, onnx::Gather_3181, onnx::Unsqueeze_3182"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_3182"/>
-			</output>
-		</layer>
-		<layer id="1632" name="onnx::Mul_3203" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3203"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3203"/>
-			</output>
-		</layer>
-		<layer id="1633" name="onnx::Unsqueeze_3204" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3203, onnx::Unsqueeze_3204"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_3204"/>
-			</output>
-		</layer>
-		<layer id="1634" name="onnx::Unsqueeze_3209" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3209"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3209">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1635" name="onnx::Concat_3210" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3210, onnx::Unsqueeze_3209"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3210">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1636" name="onnx::Reshape_3211" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3211"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3211">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1637" name="onnx::MatMul_3212" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3212"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3212">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1638" name="Constant_147849" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="607191216" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3214, onnx::MatMul_8728"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1639" name="onnx::Add_3214" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3214, onnx::MatMul_8728"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3214">
-					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1640" name="input.360" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1534" name="Constant_86986" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.360"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.360">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1641" name="input.364" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.364"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.364">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1642" name="Constant_10153" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10153"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1643" name="onnx::Mul_3225" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3221, onnx::Div_3224, onnx::Mul_3225, onnx::Pow_3218, onnx::ReduceMean_3220, onnx::Sqrt_3223, onnx::Sub_3217"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3225">
-					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1644" name="Constant_150395" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="613744816" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1645" name="onnx::Add_3226" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3226"/>
-			</rt_info>
+		<layer id="1535" name="/down_blocks.2/attentions.1/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27820,65 +24947,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3226">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1646" name="Constant_150396" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="613749936" size="5120"/>
+		<layer id="1536" name="Constant_85392_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="10240, 1280" offset="306878002" size="26214400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1647" name="onnx::MatMul_3227" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1537" name="Constant_85392" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3227"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3227">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1648" name="Constant_147857" type="Const" version="opset1">
-			<data element_type="f32" shape="10240, 1280" offset="613755056" size="52428800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3229, onnx::MatMul_8729"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1649" name="onnx::Add_3229" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3229, onnx::MatMul_8729"/>
-			</rt_info>
+		<layer id="1538" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27891,18 +24995,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3229">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1650" name="onnx::Shape_3230" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_3230"/>
-			</rt_info>
+		<layer id="1539" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -27916,51 +25017,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_3230">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1651" name="Constant_126672" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1540" name="Constant_77213" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1652" name="Constant_126673" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1541" name="Constant_77214" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1653" name="Constant_126669" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1542" name="Constant_77210" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1654" name="onnx::Gather_3231" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3231"/>
-			</rt_info>
+		<layer id="1543" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -27969,36 +25058,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3231">
+				<port id="1" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1655" name="onnx::Gather_3232" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1544" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3232">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1656" name="Constant_10170" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10170"/>
-			</rt_info>
+		<layer id="1545" name="Constant_19080" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="1657" name="onnx::Add_3233" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10170, onnx::Add_3233, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1546" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -28006,30 +25086,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_3233">
+				<port id="3" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1658" name="onnx::Add_3235" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3235"/>
-			</rt_info>
+		<layer id="1547" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_3235">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1659" name="onnx::Div_3236" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3235, onnx::Div_3236"/>
-			</rt_info>
+		<layer id="1548" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -28039,27 +25113,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_3236">
+				<port id="2" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1660" name="onnx::Div_3237" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3237"/>
-			</rt_info>
+		<layer id="1549" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_3237">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1661" name="onnx::Mul_3238" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3237, onnx::Mul_3238, onnx::Mul_3239, onnx::Slice_3240"/>
-			</rt_info>
+		<layer id="1550" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -28069,26 +25137,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_3238,onnx::Slice_3240">
+				<port id="2" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Div_output_0,/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1662" name="Constant_126668" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1551" name="Constant_77209" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1663" name="ScatterUpdate_126674" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1552" name="ScatterUpdate_77215" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -28109,22 +25171,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1664" name="Constant_126677" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1553" name="Constant_77218" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1665" name="onnx::Mul_3241" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10181, onnx::Gather_3232, onnx::Mul_3241"/>
-			</rt_info>
+		<layer id="1554" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28142,50 +25198,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_3241">
+				<port id="4" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1666" name="Constant_126741" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1555" name="Constant_77282" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1667" name="Constant_126740" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1556" name="Constant_77281" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1668" name="Constant_126739" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1557" name="Constant_77280" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1669" name="ScatterUpdate_126742" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1558" name="ScatterUpdate_77283" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -28206,33 +25250,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1670" name="Constant_126743" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1559" name="Constant_77284" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1671" name="onnx::Mul_3242" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3242"/>
-			</rt_info>
+		<layer id="1560" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3242">
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1672" name="onnx::Slice_3243" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3242, onnx::Slice_3243"/>
-			</rt_info>
+		<layer id="1561" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -28242,15 +25277,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_3243">
+				<port id="2" precision="I64" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1673" name="ScatterUpdate_126744" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1562" name="ScatterUpdate_77285" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -28271,22 +25303,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1674" name="Constant_126747" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1563" name="Constant_77288" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1675" name="onnx::Div_3244" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_10250, onnx::Div_3244, onnx::Gather_3232"/>
-			</rt_info>
+		<layer id="1564" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28304,18 +25330,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_3244">
+				<port id="4" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1676" name="onnx::Mul_3252" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3247, onnx::Erf_3246, onnx::Mul_3249, onnx::Mul_3250, onnx::Mul_3252"/>
-			</rt_info>
+		<layer id="1565" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28324,18 +25347,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_3252">
+				<port id="1" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1677" name="input.368" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.368"/>
-			</rt_info>
+		<layer id="1566" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28349,30 +25369,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.368">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1678" name="Constant_147865" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 5120" offset="666183856" size="26214400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3255, onnx::MatMul_8730"/>
-			</rt_info>
+		<layer id="1567" name="Constant_85400_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 5120" offset="333092402" size="13107200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1679" name="onnx::Add_3255" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1568" name="Constant_85400" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3255, onnx::MatMul_8730"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1569" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28385,18 +25417,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3255">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1680" name="onnx::Add_3256" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3256"/>
-			</rt_info>
+		<layer id="1570" name="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -28410,18 +25439,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3256">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1681" name="onnx::Reshape_3257" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3257"/>
-			</rt_info>
+		<layer id="1571" name="/down_blocks.2/attentions.1/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28435,18 +25461,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3257">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1682" name="onnx::Transpose_3267" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3267"/>
-			</rt_info>
+		<layer id="1572" name="/down_blocks.2/attentions.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="214428690" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/down_blocks.2/attentions.1/Constant_1_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1573" name="/down_blocks.2/attentions.1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28458,7 +25489,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3267">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>16</dim>
 					<dim>16</dim>
@@ -28466,21 +25497,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1683" name="Constant_10331" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10331"/>
-			</rt_info>
+		<layer id="1574" name="Constant_19245" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1684" name="input.372" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.372"/>
-			</rt_info>
+		<layer id="1575" name="/down_blocks.2/attentions.1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28493,7 +25518,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.372">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -28501,13 +25526,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1685" name="m.down_blocks.2.attentions.1.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="692398256" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.attentions.1.proj_out.weight"/>
-			</rt_info>
+		<layer id="1576" name="down_blocks.2.attentions.1.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="346199602" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.attentions.1.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -28515,11 +25537,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1686" name="Convolution_10333" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1577" name="down_blocks.2.attentions.1.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_10333"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.attentions.1.proj_out.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1578" name="/down_blocks.2/attentions.1/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28543,10 +25584,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1687" name="Reshape_10353" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="698951856" size="5120"/>
+		<layer id="1579" name="Reshape_19267_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="349476402" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -28554,11 +25595,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1688" name="onnx::Add_3269" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1580" name="Reshape_19267" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10352, Reshape_10353, onnx::Add_3269"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1581" name="/down_blocks.2/attentions.1/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28574,7 +25634,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3269">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -28582,11 +25642,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1689" name="input.376" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.376"/>
-			</rt_info>
+		<layer id="1582" name="/down_blocks.2/attentions.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28602,7 +25659,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.376">
+				<port id="2" precision="FP32" names="/down_blocks.2/attentions.1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -28610,13 +25667,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1690" name="m.down_blocks.2.downsamplers.0.conv.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="698956976" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.2.downsamplers.0.conv.weight"/>
-			</rt_info>
+		<layer id="1583" name="down_blocks.2.downsamplers.0.conv.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="349478962" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.2.downsamplers.0.conv.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -28624,11 +25678,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1691" name="Convolution_10382" type="Convolution" version="opset1">
-			<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1584" name="down_blocks.2.downsamplers.0.conv.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_10382"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.2.downsamplers.0.conv.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1585" name="/down_blocks.2/downsamplers.0/conv/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28652,10 +25725,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1692" name="Reshape_10402" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="757939376" size="5120"/>
+		<layer id="1586" name="Reshape_19316_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="378970162" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -28663,11 +25736,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1693" name="onnx::Cast_3271" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1587" name="Reshape_19316" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10401, Reshape_10402, input.380, onnx::Cast_3271"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1588" name="/down_blocks.2/downsamplers.0/conv/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28683,7 +25775,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.380,onnx::Cast_3271">
+				<port id="2" precision="FP32" names="/down_blocks.2/downsamplers.0/conv/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -28691,22 +25783,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1694" name="onnx::Reshape_3273" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3273"/>
-			</rt_info>
+		<layer id="1589" name="/down_blocks.3/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3273">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1695" name="onnx::InstanceNormalization_3274" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3274"/>
-			</rt_info>
+		<layer id="1590" name="/down_blocks.3/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28719,29 +25805,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3274">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1696" name="Constant_10440" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10440"/>
-			</rt_info>
+		<layer id="1591" name="Constant_19353" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1697" name="MVN_10441" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10460, Concat_10505, MVN_10441, Multiply_10488, Reshape_10461, Reshape_10506, onnx::Reshape_3277"/>
-			</rt_info>
+		<layer id="1592" name="MVN_19354" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28753,18 +25833,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3277">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1698" name="onnx::Reshape_3278" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3278"/>
-			</rt_info>
+		<layer id="1593" name="/down_blocks.3/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28774,16 +25851,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3278">
+				<port id="1" precision="I64" names="/down_blocks.3/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1699" name="onnx::Mul_3279" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3279"/>
-			</rt_info>
+		<layer id="1594" name="/down_blocks.3/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28795,7 +25869,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3279">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -28803,10 +25877,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1700" name="Constant_150399" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="757944496" size="5120"/>
+		<layer id="1595" name="Constant_86989_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="378972722" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -28814,11 +25888,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1701" name="onnx::Add_3282" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1596" name="Constant_86989" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3282"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1597" name="/down_blocks.3/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28834,7 +25927,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3282">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -28842,10 +25935,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1702" name="Constant_150400" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="757949616" size="5120"/>
+		<layer id="1598" name="Constant_86990_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="378975282" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -28853,11 +25946,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1703" name="onnx::Cast_3285" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1599" name="Constant_86990" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.384, onnx::Cast_3285"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1600" name="/down_blocks.3/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28873,7 +25985,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.384,onnx::Cast_3285">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -28881,10 +25993,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1704" name="input.388" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.388, onnx::Mul_3287"/>
-			</rt_info>
+		<layer id="1601" name="/down_blocks.3/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28894,7 +26003,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.388">
+				<port id="1" precision="FP32" names="/down_blocks.3/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -28902,13 +26011,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1705" name="m.down_blocks.3.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="757954736" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.3.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="1602" name="down_blocks.3.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="378977842" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.3.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -28916,11 +26022,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1706" name="Convolution_10546" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1603" name="down_blocks.3.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_10546"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.3.resnets.0.conv1.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1604" name="/down_blocks.3/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28944,10 +26069,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1707" name="Reshape_10566" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="816937136" size="5120"/>
+		<layer id="1605" name="Reshape_19478_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="408469042" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -28955,11 +26080,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1708" name="onnx::Add_3289" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1606" name="Reshape_19478" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10565, Reshape_10566, onnx::Add_3289"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1607" name="/down_blocks.3/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -28975,7 +26119,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3289">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -28983,40 +26127,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1709" name="onnx::Gemm_3291" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3291, onnx::Mul_3290"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1608" name="down_blocks.3.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="408471602" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3291">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1710" name="m.down_blocks.3.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="816942256" size="6553600"/>
+		<layer id="1609" name="down_blocks.3.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.3.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.3.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="down_blocks.3.resnets.0.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1711" name="MatMul_10598" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_10598"/>
-			</rt_info>
+		<layer id="1610" name="/down_blocks.3/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29034,20 +26173,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1712" name="Constant_150401" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="823495856" size="5120"/>
+		<layer id="1611" name="Constant_86991_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="411748402" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1713" name="onnx::Unsqueeze_3292" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1612" name="Constant_86991" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_10599, onnx::Unsqueeze_3292"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1613" name="/down_blocks.3/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29059,27 +26213,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3292">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1714" name="onnx::Unsqueeze_3293" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3293"/>
-			</rt_info>
+		<layer id="1614" name="/down_blocks.3/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3293">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1715" name="onnx::Unsqueeze_3294" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3294"/>
-			</rt_info>
+		<layer id="1615" name="/down_blocks.3/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29090,28 +26238,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3294">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1716" name="onnx::Unsqueeze_3295" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3295"/>
-			</rt_info>
+		<layer id="1616" name="/down_blocks.3/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3295">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1717" name="onnx::Add_3296" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3296"/>
-			</rt_info>
+		<layer id="1617" name="/down_blocks.3/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29123,7 +26265,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3296">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29131,11 +26273,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1718" name="onnx::Cast_3297" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.392, onnx::Cast_3297"/>
-			</rt_info>
+		<layer id="1618" name="/down_blocks.3/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29151,7 +26290,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.392,onnx::Cast_3297">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29159,22 +26298,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1719" name="onnx::Reshape_3299" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3299"/>
-			</rt_info>
+		<layer id="1619" name="/down_blocks.3/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3299">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.0/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1720" name="onnx::InstanceNormalization_3300" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3300"/>
-			</rt_info>
+		<layer id="1620" name="/down_blocks.3/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29187,29 +26320,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3300">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1721" name="Constant_10616" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10616"/>
-			</rt_info>
+		<layer id="1621" name="Constant_19526" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1722" name="MVN_10617" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10636, Concat_10681, MVN_10617, Multiply_10664, Reshape_10637, Reshape_10682, onnx::Reshape_3303"/>
-			</rt_info>
+		<layer id="1622" name="MVN_19527" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29221,18 +26348,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3303">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1723" name="onnx::Reshape_3304" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3304"/>
-			</rt_info>
+		<layer id="1623" name="/down_blocks.3/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29242,16 +26366,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3304">
+				<port id="1" precision="I64" names="/down_blocks.3/resnets.0/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1724" name="onnx::Mul_3305" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3305"/>
-			</rt_info>
+		<layer id="1624" name="/down_blocks.3/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29263,7 +26384,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3305">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29271,10 +26392,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1725" name="Constant_150402" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="823500976" size="5120"/>
+		<layer id="1625" name="Constant_86992_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="411750962" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29282,11 +26403,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1726" name="onnx::Add_3308" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1626" name="Constant_86992" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3308"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1627" name="/down_blocks.3/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29302,7 +26442,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3308">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29310,10 +26450,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1727" name="Constant_150403" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="823506096" size="5120"/>
+		<layer id="1628" name="Constant_86993_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="411753522" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29321,11 +26461,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1728" name="onnx::Cast_3311" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1629" name="Constant_86993" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.396, onnx::Cast_3311"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1630" name="/down_blocks.3/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29341,7 +26500,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.396,onnx::Cast_3311">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29349,10 +26508,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1729" name="input.400" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.400, onnx::Mul_3313"/>
-			</rt_info>
+		<layer id="1631" name="/down_blocks.3/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29362,7 +26518,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.400">
+				<port id="1" precision="FP32" names="/down_blocks.3/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29370,13 +26526,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1730" name="m.down_blocks.3.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="823511216" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.3.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="1632" name="down_blocks.3.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="411756082" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.3.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -29384,11 +26537,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1731" name="Convolution_10722" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1633" name="down_blocks.3.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_10722"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.3.resnets.0.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1634" name="/down_blocks.3/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29412,10 +26584,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1732" name="Reshape_10742" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="882493616" size="5120"/>
+		<layer id="1635" name="Reshape_19651_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="441247282" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29423,11 +26595,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1733" name="onnx::Add_3315" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1636" name="Reshape_19651" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10741, Reshape_10742, onnx::Add_3315"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1637" name="/down_blocks.3/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29443,7 +26634,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3315">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29451,11 +26642,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1734" name="onnx::Div_3316" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.404, onnx::Cast_3318, onnx::Div_3316"/>
-			</rt_info>
+		<layer id="1638" name="/down_blocks.3/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29471,7 +26659,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.404,onnx::Cast_3318,onnx::Div_3316">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.0/Add_1_output_0,/down_blocks.3/resnets.0/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29479,22 +26667,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1735" name="onnx::Reshape_3320" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3320"/>
-			</rt_info>
+		<layer id="1639" name="/down_blocks.3/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3320">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1736" name="onnx::InstanceNormalization_3321" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3321"/>
-			</rt_info>
+		<layer id="1640" name="/down_blocks.3/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29507,29 +26689,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3321">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1737" name="Constant_10783" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10783"/>
-			</rt_info>
+		<layer id="1641" name="Constant_19691" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1738" name="MVN_10784" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10803, Concat_10848, MVN_10784, Multiply_10831, Reshape_10804, Reshape_10849, onnx::Reshape_3324"/>
-			</rt_info>
+		<layer id="1642" name="MVN_19692" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29541,18 +26717,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3324">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1739" name="onnx::Reshape_3325" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3325"/>
-			</rt_info>
+		<layer id="1643" name="/down_blocks.3/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29562,16 +26735,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3325">
+				<port id="1" precision="I64" names="/down_blocks.3/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1740" name="onnx::Mul_3326" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3326"/>
-			</rt_info>
+		<layer id="1644" name="/down_blocks.3/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29583,7 +26753,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3326">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29591,10 +26761,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1741" name="Constant_150404" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="882498736" size="5120"/>
+		<layer id="1645" name="Constant_86994_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="441249842" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29602,11 +26772,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1742" name="onnx::Add_3329" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1646" name="Constant_86994" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3329"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1647" name="/down_blocks.3/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29622,7 +26811,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3329">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29630,10 +26819,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1743" name="Constant_150405" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="882503856" size="5120"/>
+		<layer id="1648" name="Constant_86995_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="441252402" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29641,11 +26830,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1744" name="onnx::Cast_3332" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1649" name="Constant_86995" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.408, onnx::Cast_3332"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1650" name="/down_blocks.3/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29661,7 +26869,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.408,onnx::Cast_3332">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29669,10 +26877,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1745" name="input.412" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.412, onnx::Mul_3334"/>
-			</rt_info>
+		<layer id="1651" name="/down_blocks.3/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29682,7 +26887,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.412">
+				<port id="1" precision="FP32" names="/down_blocks.3/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29690,13 +26895,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1746" name="m.down_blocks.3.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="882508976" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.3.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="1652" name="down_blocks.3.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="441254962" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.3.resnets.1.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -29704,11 +26906,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1747" name="Convolution_10889" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1653" name="down_blocks.3.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_10889"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.3.resnets.1.conv1.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1654" name="/down_blocks.3/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29732,10 +26953,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1748" name="Reshape_10909" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="941491376" size="5120"/>
+		<layer id="1655" name="Reshape_19816_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="470746162" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29743,11 +26964,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1749" name="onnx::Add_3336" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1656" name="Reshape_19816" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10908, Reshape_10909, onnx::Add_3336"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1657" name="/down_blocks.3/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29763,7 +27003,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3336">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29771,40 +27011,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1750" name="onnx::Gemm_3338" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3338, onnx::Mul_3337"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1658" name="down_blocks.3.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="470748722" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3338">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1751" name="m.down_blocks.3.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="941496496" size="6553600"/>
+		<layer id="1659" name="down_blocks.3.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.3.resnets.1.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.3.resnets.1.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="down_blocks.3.resnets.1.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1752" name="MatMul_10941" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_10941"/>
-			</rt_info>
+		<layer id="1660" name="/down_blocks.3/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29822,20 +27057,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1753" name="Constant_150406" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="948050096" size="5120"/>
+		<layer id="1661" name="Constant_86996_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="474025522" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1754" name="onnx::Unsqueeze_3339" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1662" name="Constant_86996" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_10942, onnx::Unsqueeze_3339"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1663" name="/down_blocks.3/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29847,27 +27097,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3339">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1755" name="onnx::Unsqueeze_3340" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3340"/>
-			</rt_info>
+		<layer id="1664" name="/down_blocks.3/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3340">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1756" name="onnx::Unsqueeze_3341" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3341"/>
-			</rt_info>
+		<layer id="1665" name="/down_blocks.3/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29878,28 +27122,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3341">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1757" name="onnx::Unsqueeze_3342" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3342"/>
-			</rt_info>
+		<layer id="1666" name="/down_blocks.3/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3342">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1758" name="onnx::Add_3343" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3343"/>
-			</rt_info>
+		<layer id="1667" name="/down_blocks.3/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29911,7 +27149,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3343">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -29919,11 +27157,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1759" name="onnx::Cast_3344" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.416, onnx::Cast_3344"/>
-			</rt_info>
+		<layer id="1668" name="/down_blocks.3/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29939,7 +27174,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.416,onnx::Cast_3344">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -29947,22 +27182,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1760" name="onnx::Reshape_3346" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3346"/>
-			</rt_info>
+		<layer id="1669" name="/down_blocks.3/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3346">
+				<port id="0" precision="I64" names="/down_blocks.3/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1761" name="onnx::InstanceNormalization_3347" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3347"/>
-			</rt_info>
+		<layer id="1670" name="/down_blocks.3/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -29975,29 +27204,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3347">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1762" name="Constant_10959" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_10959"/>
-			</rt_info>
+		<layer id="1671" name="Constant_19864" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1763" name="MVN_10960" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_10979, Concat_11024, MVN_10960, Multiply_11007, Reshape_10980, Reshape_11025, onnx::Reshape_3350"/>
-			</rt_info>
+		<layer id="1672" name="MVN_19865" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30009,18 +27232,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3350">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1764" name="onnx::Reshape_3351" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3351"/>
-			</rt_info>
+		<layer id="1673" name="/down_blocks.3/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30030,16 +27250,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3351">
+				<port id="1" precision="I64" names="/down_blocks.3/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1765" name="onnx::Mul_3352" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3352"/>
-			</rt_info>
+		<layer id="1674" name="/down_blocks.3/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30051,7 +27268,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3352">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30059,10 +27276,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1766" name="Constant_150407" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="948055216" size="5120"/>
+		<layer id="1675" name="Constant_86997_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="474028082" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30070,11 +27287,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1767" name="onnx::Add_3355" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1676" name="Constant_86997" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3355"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1677" name="/down_blocks.3/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30090,7 +27326,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3355">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30098,10 +27334,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1768" name="Constant_150408" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="948060336" size="5120"/>
+		<layer id="1678" name="Constant_86998_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="474030642" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30109,11 +27345,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1769" name="onnx::Cast_3358" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1679" name="Constant_86998" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.420, onnx::Cast_3358"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1680" name="/down_blocks.3/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30129,7 +27384,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.420,onnx::Cast_3358">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30137,10 +27392,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1770" name="input.424" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.424, onnx::Mul_3360"/>
-			</rt_info>
+		<layer id="1681" name="/down_blocks.3/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30150,7 +27402,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.424">
+				<port id="1" precision="FP32" names="/down_blocks.3/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30158,13 +27410,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1771" name="m.down_blocks.3.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="948065456" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.down_blocks.3.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="1682" name="down_blocks.3.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="474033202" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.down_blocks.3.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -30172,11 +27421,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1772" name="Convolution_11065" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1683" name="down_blocks.3.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_11065"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="down_blocks.3.resnets.1.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1684" name="/down_blocks.3/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30200,10 +27468,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1773" name="Reshape_11085" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1007047856" size="5120"/>
+		<layer id="1685" name="Reshape_19989_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="503524402" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30211,11 +27479,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1774" name="onnx::Add_3362" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1686" name="Reshape_19989" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11084, Reshape_11085, onnx::Add_3362"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1687" name="/down_blocks.3/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30231,7 +27518,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3362">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30239,11 +27526,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1775" name="onnx::Div_3363" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.428, onnx::Cast_3365, onnx::Div_3363"/>
-			</rt_info>
+		<layer id="1688" name="/down_blocks.3/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30259,7 +27543,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.428,onnx::Cast_3365,onnx::Div_3363">
+				<port id="2" precision="FP32" names="/down_blocks.3/resnets.1/Add_1_output_0,/down_blocks.3/resnets.1/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30267,22 +27551,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1776" name="onnx::Reshape_3367" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3367"/>
-			</rt_info>
+		<layer id="1689" name="/mid_block/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3367">
+				<port id="0" precision="I64" names="/mid_block/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1777" name="onnx::InstanceNormalization_3368" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3368"/>
-			</rt_info>
+		<layer id="1690" name="/mid_block/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30295,29 +27573,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3368">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1778" name="Constant_11126" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11126"/>
-			</rt_info>
+		<layer id="1691" name="Constant_20029" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1779" name="MVN_11127" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11146, Concat_11191, MVN_11127, Multiply_11174, Reshape_11147, Reshape_11192, onnx::Reshape_3371"/>
-			</rt_info>
+		<layer id="1692" name="MVN_20030" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30329,18 +27601,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3371">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1780" name="onnx::Reshape_3372" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3372"/>
-			</rt_info>
+		<layer id="1693" name="/mid_block/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30350,16 +27619,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3372">
+				<port id="1" precision="I64" names="/mid_block/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1781" name="onnx::Mul_3373" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3373"/>
-			</rt_info>
+		<layer id="1694" name="/mid_block/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30371,7 +27637,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3373">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30379,10 +27645,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1782" name="Constant_150409" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1007052976" size="5120"/>
+		<layer id="1695" name="Constant_86999_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="503526962" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30390,11 +27656,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1783" name="onnx::Add_3376" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1696" name="Constant_86999" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3376"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1697" name="/mid_block/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30410,7 +27695,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3376">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30418,10 +27703,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1784" name="Constant_150410" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1007058096" size="5120"/>
+		<layer id="1698" name="Constant_87000_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="503529522" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30429,11 +27714,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1785" name="onnx::Cast_3379" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1699" name="Constant_87000" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.432, onnx::Cast_3379"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1700" name="/mid_block/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30449,7 +27753,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.432,onnx::Cast_3379">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30457,10 +27761,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1786" name="input.436" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.436, onnx::Mul_3381"/>
-			</rt_info>
+		<layer id="1701" name="/mid_block/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30470,7 +27771,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.436">
+				<port id="1" precision="FP32" names="/mid_block/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30478,13 +27779,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1787" name="m.mid_block.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1007063216" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="1702" name="mid_block.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="503532082" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -30492,11 +27790,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1788" name="Convolution_11232" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1703" name="mid_block.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_11232"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="mid_block.resnets.0.conv1.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1704" name="/mid_block/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30520,10 +27837,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1789" name="Reshape_11252" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1066045616" size="5120"/>
+		<layer id="1705" name="Reshape_20154_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="533023282" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30531,11 +27848,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1790" name="onnx::Add_3383" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1706" name="Reshape_20154" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11251, Reshape_11252, onnx::Add_3383"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1707" name="/mid_block/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30551,7 +27887,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3383">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30559,40 +27895,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1791" name="onnx::Gemm_3385" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3385, onnx::Mul_3384"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1708" name="mid_block.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="533025842" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3385">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1792" name="m.mid_block.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1066050736" size="6553600"/>
+		<layer id="1709" name="mid_block.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="mid_block.resnets.0.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1793" name="MatMul_11284" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_11284"/>
-			</rt_info>
+		<layer id="1710" name="/mid_block/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30610,20 +27941,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1794" name="Constant_150411" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="1072604336" size="5120"/>
+		<layer id="1711" name="Constant_87001_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="536302642" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1795" name="onnx::Unsqueeze_3386" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1712" name="Constant_87001" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_11285, onnx::Unsqueeze_3386"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1713" name="/mid_block/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30635,27 +27981,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3386">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1796" name="onnx::Unsqueeze_3387" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3387"/>
-			</rt_info>
+		<layer id="1714" name="/mid_block/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3387">
+				<port id="0" precision="I64" names="/mid_block/resnets.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1797" name="onnx::Unsqueeze_3388" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3388"/>
-			</rt_info>
+		<layer id="1715" name="/mid_block/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30666,28 +28006,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3388">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1798" name="onnx::Unsqueeze_3389" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3389"/>
-			</rt_info>
+		<layer id="1716" name="/mid_block/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3389">
+				<port id="0" precision="I64" names="/mid_block/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1799" name="onnx::Add_3390" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3390"/>
-			</rt_info>
+		<layer id="1717" name="/mid_block/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30699,7 +28033,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3390">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30707,11 +28041,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1800" name="onnx::Cast_3391" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.440, onnx::Cast_3391"/>
-			</rt_info>
+		<layer id="1718" name="/mid_block/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30727,7 +28058,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.440,onnx::Cast_3391">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30735,22 +28066,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1801" name="onnx::Reshape_3393" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3393"/>
-			</rt_info>
+		<layer id="1719" name="/mid_block/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3393">
+				<port id="0" precision="I64" names="/mid_block/resnets.0/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1802" name="onnx::InstanceNormalization_3394" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3394"/>
-			</rt_info>
+		<layer id="1720" name="/mid_block/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30763,29 +28088,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3394">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1803" name="Constant_11302" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11302"/>
-			</rt_info>
+		<layer id="1721" name="Constant_20202" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1804" name="MVN_11303" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11322, Concat_11367, MVN_11303, Multiply_11350, Reshape_11323, Reshape_11368, onnx::Reshape_3397"/>
-			</rt_info>
+		<layer id="1722" name="MVN_20203" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30797,18 +28116,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3397">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1805" name="onnx::Reshape_3398" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3398"/>
-			</rt_info>
+		<layer id="1723" name="/mid_block/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30818,16 +28134,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3398">
+				<port id="1" precision="I64" names="/mid_block/resnets.0/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1806" name="onnx::Mul_3399" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3399"/>
-			</rt_info>
+		<layer id="1724" name="/mid_block/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30839,7 +28152,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3399">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30847,10 +28160,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1807" name="Constant_150412" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1072609456" size="5120"/>
+		<layer id="1725" name="Constant_87002_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="536305202" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30858,11 +28171,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1808" name="onnx::Add_3402" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1726" name="Constant_87002" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3402"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1727" name="/mid_block/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30878,7 +28210,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3402">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30886,10 +28218,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1809" name="Constant_150413" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1072614576" size="5120"/>
+		<layer id="1728" name="Constant_87003_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="536307762" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30897,11 +28229,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1810" name="onnx::Cast_3405" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1729" name="Constant_87003" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.444, onnx::Cast_3405"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1730" name="/mid_block/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30917,7 +28268,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.444,onnx::Cast_3405">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30925,10 +28276,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1811" name="input.448" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.448, onnx::Mul_3407"/>
-			</rt_info>
+		<layer id="1731" name="/mid_block/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30938,7 +28286,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.448">
+				<port id="1" precision="FP32" names="/mid_block/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -30946,13 +28294,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1812" name="m.mid_block.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1072619696" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="1732" name="mid_block.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="536310322" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -30960,11 +28305,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1813" name="Convolution_11408" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1733" name="mid_block.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_11408"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="mid_block.resnets.0.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1734" name="/mid_block/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -30988,10 +28352,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1814" name="Reshape_11428" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1131602096" size="5120"/>
+		<layer id="1735" name="Reshape_20327_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="565801522" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -30999,11 +28363,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1815" name="onnx::Add_3409" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1736" name="Reshape_20327" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11427, Reshape_11428, onnx::Add_3409"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1737" name="/mid_block/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31019,7 +28402,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3409">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -31027,11 +28410,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1816" name="onnx::Div_3410" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.452, onnx::Div_3410"/>
-			</rt_info>
+		<layer id="1738" name="/mid_block/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31047,7 +28427,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.452,onnx::Div_3410">
+				<port id="2" precision="FP32" names="/mid_block/resnets.0/Add_1_output_0,/mid_block/resnets.0/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -31055,22 +28435,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1817" name="onnx::Reshape_3417" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3417"/>
-			</rt_info>
+		<layer id="1739" name="/mid_block/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3417">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1818" name="onnx::InstanceNormalization_3418" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3418"/>
-			</rt_info>
+		<layer id="1740" name="/mid_block/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31083,29 +28457,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3418">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1819" name="Constant_11468" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11468"/>
-			</rt_info>
+		<layer id="1741" name="Constant_20367" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1820" name="MVN_11469" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11488, Concat_11533, MVN_11469, Multiply_11516, Reshape_11489, Reshape_11534, onnx::Reshape_3421"/>
-			</rt_info>
+		<layer id="1742" name="MVN_20368" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31117,18 +28485,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3421">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1821" name="onnx::Reshape_3422" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3422"/>
-			</rt_info>
+		<layer id="1743" name="/mid_block/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31138,16 +28503,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3422">
+				<port id="1" precision="I64" names="/mid_block/attentions.0/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1822" name="onnx::Mul_3423" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3423"/>
-			</rt_info>
+		<layer id="1744" name="/mid_block/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31159,7 +28521,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3423">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -31167,10 +28529,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1823" name="Constant_150414" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1131607216" size="5120"/>
+		<layer id="1745" name="Constant_87004_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="565804082" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -31178,11 +28540,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1824" name="onnx::Add_3426" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1746" name="Constant_87004" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3426"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1747" name="/mid_block/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31198,7 +28579,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3426">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -31206,10 +28587,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1825" name="Constant_150415" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1131612336" size="5120"/>
+		<layer id="1748" name="Constant_87005_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="565806642" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -31217,11 +28598,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1826" name="input.456" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1749" name="Constant_87005" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.456"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1750" name="/mid_block/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31237,7 +28637,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.456">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -31245,13 +28645,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1827" name="m.mid_block.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="1131617456" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.attentions.0.proj_in.weight"/>
-			</rt_info>
+		<layer id="1751" name="mid_block.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="565809202" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.attentions.0.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -31259,11 +28656,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1828" name="Convolution_11571" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1752" name="mid_block.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_11571"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="mid_block.attentions.0.proj_in.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1753" name="/mid_block/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31287,10 +28703,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1829" name="Reshape_11591" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1138171056" size="5120"/>
+		<layer id="1754" name="Reshape_20490_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="569086002" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -31298,11 +28714,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1830" name="onnx::Transpose_3430" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1755" name="Reshape_20490" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_11590, Reshape_11591, onnx::Transpose_3430"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1756" name="/mid_block/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31318,7 +28753,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3430">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -31326,21 +28761,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1831" name="Constant_11619" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11619"/>
-			</rt_info>
+		<layer id="1757" name="Constant_20518" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1832" name="onnx::Reshape_3431" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3431"/>
-			</rt_info>
+		<layer id="1758" name="/mid_block/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31353,7 +28782,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3431">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>8</dim>
@@ -31361,22 +28790,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1833" name="onnx::Reshape_8753" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="1138176176" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8753"/>
-			</rt_info>
+		<layer id="1759" name="/mid_block/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="569088562" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8753">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1834" name="input.460" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.460"/>
-			</rt_info>
+		<layer id="1760" name="/mid_block/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31389,29 +28812,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.460">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/Reshape_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1835" name="Constant_11627" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11627"/>
-			</rt_info>
+		<layer id="1761" name="Constant_20527" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1836" name="onnx::Mul_3449" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3445, onnx::Div_3448, onnx::Mul_3449, onnx::Pow_3442, onnx::ReduceMean_3444, onnx::Sqrt_3447, onnx::Sub_3441"/>
-			</rt_info>
+		<layer id="1762" name="/mid_block/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31423,28 +28840,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3449">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1837" name="Constant_150416" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="1138176200" size="5120"/>
+		<layer id="1763" name="Constant_87006_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="569088586" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1838" name="onnx::Add_3450" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1764" name="Constant_87006" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3450"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1765" name="/mid_block/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31458,28 +28892,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3450">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1839" name="Constant_150417" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="1138181320" size="5120"/>
+		<layer id="1766" name="Constant_87007_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="569091146" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1840" name="onnx::MatMul_3451" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1767" name="Constant_87007" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3451"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1768" name="/mid_block/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31493,30 +28944,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3451">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1841" name="Constant_147882" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1138186440" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8754, q.95"/>
-			</rt_info>
+		<layer id="1769" name="Constant_85414_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="569093706" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1842" name="q.95" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1770" name="Constant_85414" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8754, q.95"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1771" name="/mid_block/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31529,29 +28992,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.95">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1843" name="onnx::Reshape_8770" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="1144740040" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8770"/>
-			</rt_info>
+		<layer id="1772" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="572370506" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8761,onnx::Reshape_8770,onnx::Reshape_8779">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1844" name="onnx::Transpose_3471" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3471"/>
-			</rt_info>
+		<layer id="1773" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31563,7 +29020,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3471">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>8</dim>
@@ -31571,21 +29028,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1845" name="Constant_11649" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11649"/>
-			</rt_info>
+		<layer id="1774" name="Constant_20550" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1846" name="onnx::Reshape_3472" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3472"/>
-			</rt_info>
+		<layer id="1775" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31598,7 +29049,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3472">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>64</dim>
@@ -31606,22 +29057,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1847" name="onnx::Reshape_8774" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="1144740072" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8774"/>
-			</rt_info>
+		<layer id="1776" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="572370538" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8765,onnx::Reshape_8774,onnx::Reshape_8783">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1848" name="q.99" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.99"/>
-			</rt_info>
+		<layer id="1777" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31634,30 +29079,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.99">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1849" name="Constant_147889" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1144740096" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.95, onnx::MatMul_8755"/>
-			</rt_info>
+		<layer id="1778" name="Constant_85421_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="572370562" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1850" name="k.95" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1779" name="Constant_85421" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.95, onnx::MatMul_8755"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1780" name="/mid_block/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31670,18 +29127,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.95">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1851" name="onnx::Transpose_3496" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3496"/>
-			</rt_info>
+		<layer id="1781" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="572370506" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1782" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31693,7 +29155,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3496">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>8</dim>
@@ -31701,21 +29163,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1852" name="Constant_11660" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11660"/>
-			</rt_info>
+		<layer id="1783" name="Constant_20566" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1853" name="onnx::Reshape_3497" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3497"/>
-			</rt_info>
+		<layer id="1784" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31728,7 +29184,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3497">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>64</dim>
@@ -31736,11 +29192,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1854" name="k.99" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.99"/>
-			</rt_info>
+		<layer id="1785" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="572370538" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1786" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31753,18 +29214,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.99">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1855" name="onnx::Mul_3533" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="1787" name="Constant_87008_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1788" name="Constant_87008" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3533"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1789" name="Multiply_86199" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -31772,59 +29260,43 @@
 					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>64</dim>
-					<dim>160</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3533">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
 					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1856" name="Constant_150418" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1857" name="onnx::Softmax_3535" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_3535"/>
-			</rt_info>
+		<layer id="1790" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>64</dim>
-					<dim>64</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>64</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_3535">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Add_output_0,/mid_block/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1858" name="attn.47" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.47"/>
-			</rt_info>
+		<layer id="1791" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -31833,30 +29305,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.47">
+				<port id="1" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/mid_block/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1859" name="Constant_147896" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1151293696" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8756, v.95"/>
-			</rt_info>
+		<layer id="1792" name="Constant_85428_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="575647362" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1860" name="v.95" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1793" name="Constant_85428" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8756, v.95"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1794" name="/mid_block/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31869,18 +29353,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.95">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1861" name="onnx::Transpose_3521" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3521"/>
-			</rt_info>
+		<layer id="1795" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="572370506" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1796" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31892,7 +29381,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3521">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>8</dim>
@@ -31900,21 +29389,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1862" name="Constant_11668" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11668"/>
-			</rt_info>
+		<layer id="1797" name="Constant_20582" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1863" name="onnx::Reshape_3522" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3522"/>
-			</rt_info>
+		<layer id="1798" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31927,7 +29410,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3522">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>64</dim>
@@ -31935,11 +29418,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1864" name="v.99" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.99"/>
-			</rt_info>
+		<layer id="1799" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="572370538" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1800" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -31952,18 +29440,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.99">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1865" name="out.47" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.47"/>
-			</rt_info>
+		<layer id="1801" name="/mid_block/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
@@ -31977,516 +29462,353 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.47">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1866" name="onnx::Gather_3538" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3538, onnx::Gather_3541, onnx::Gather_3544"/>
-			</rt_info>
+		<layer id="1802" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="578924162" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1803" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>64</dim>
 					<dim>160</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3538,onnx::Gather_3541,onnx::Gather_3544">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>64</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1867" name="onnx::Gather_3539" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3539"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3539"/>
-			</output>
-		</layer>
-		<layer id="1868" name="Constant_11680" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11680"/>
-			</rt_info>
+		<layer id="1804" name="Constant_20608" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1869" name="onnx::Div_3540" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11680, onnx::Div_3540, onnx::Gather_3539"/>
-			</rt_info>
+		<layer id="1805" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>64</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_3540"/>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1870" name="onnx::Div_3547" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3547"/>
-			</rt_info>
+		<layer id="1806" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="569088562" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_3547"/>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1871" name="onnx::Cast_3548" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3548, onnx::Cast_3549, onnx::Div_3547, onnx::Unsqueeze_3550"/>
-			</rt_info>
+		<layer id="1807" name="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3548,onnx::Cast_3549,onnx::Unsqueeze_3550"/>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1872" name="onnx::Unsqueeze_3552" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3552"/>
-			</rt_info>
+		<layer id="1808" name="Constant_85435_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="578924194" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3552">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1873" name="onnx::Concat_3553" type="Unsqueeze" version="opset1">
+		<layer id="1809" name="Constant_85435" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3553, onnx::Unsqueeze_3552"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3553">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1874" name="Constant_90672" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3560"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1875" name="Constant_90673" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1876" name="Gather_90674" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3560"/>
-			</rt_info>
+		<layer id="1810" name="/mid_block/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1877" name="onnx::Reshape_3560" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3560"/>
-			</rt_info>
+		<layer id="1811" name="/mid_block/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3560">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1878" name="onnx::Transpose_3561" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3561"/>
-			</rt_info>
+		<layer id="1812" name="/mid_block/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>64</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3561">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>64</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1879" name="Constant_11801" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11801"/>
-			</rt_info>
+		<layer id="1813" name="Constant_20620" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1880" name="onnx::Reshape_3562" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3562"/>
-			</rt_info>
+		<layer id="1814" name="/mid_block/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>64</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3562">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1881" name="onnx::Div_3563" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3563"/>
-			</rt_info>
+		<layer id="1815" name="Constant_87010_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="582200994" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_3563"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="1882" name="onnx::Cast_3564" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="1816" name="Constant_87010" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3564, onnx::Cast_3565, onnx::Div_3563, onnx::Unsqueeze_3566"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3564,onnx::Cast_3565,onnx::Unsqueeze_3566"/>
-			</output>
-		</layer>
-		<layer id="1883" name="onnx::Unsqueeze_3569" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3569"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3569">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1884" name="onnx::Concat_3570" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3570, onnx::Unsqueeze_3569"/>
-			</rt_info>
+		<layer id="1817" name="/mid_block/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3570">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1885" name="Constant_88581" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11684, onnx::Concat_3572, onnx::Gather_3542, onnx::Unsqueeze_3543, onnx::Unsqueeze_3571"/>
-			</rt_info>
+		<layer id="1818" name="Constant_87011_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="582203554" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1886" name="Constant_11684" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11684"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1887" name="onnx::Unsqueeze_3543" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="1819" name="Constant_87011" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11684, onnx::Concat_3572, onnx::Gather_3542, onnx::Unsqueeze_3543, onnx::Unsqueeze_3571"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_3572">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1888" name="onnx::Gather_3545" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3545"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3545"/>
-			</output>
-		</layer>
-		<layer id="1889" name="Constant_11688" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11688"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1890" name="onnx::Unsqueeze_3546" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11688, onnx::Gather_3545, onnx::Unsqueeze_3546"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_3546"/>
-			</output>
-		</layer>
-		<layer id="1891" name="onnx::Mul_3567" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3567"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3567"/>
-			</output>
-		</layer>
-		<layer id="1892" name="onnx::Unsqueeze_3568" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3567, onnx::Unsqueeze_3568"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_3568"/>
-			</output>
-		</layer>
-		<layer id="1893" name="onnx::Unsqueeze_3573" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3573"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3573">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1894" name="onnx::Concat_3574" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3574, onnx::Unsqueeze_3573"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3574">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1895" name="onnx::Reshape_3575" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3575"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3575">
-					<dim>3</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1896" name="onnx::MatMul_3576" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3576"/>
-			</rt_info>
+		<layer id="1820" name="/mid_block/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>64</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3576">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1897" name="Constant_147903" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1157847296" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3578, onnx::MatMul_8785"/>
-			</rt_info>
+		<layer id="1821" name="Constant_85443_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="582206114" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1898" name="onnx::Add_3578" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1822" name="Constant_85443" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3578, onnx::MatMul_8785"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3578">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1899" name="input.464" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.464"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
-				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
 					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.464">
-					<dim>2</dim>
-					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1900" name="input.468" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.468"/>
-			</rt_info>
+		<layer id="1823" name="/mid_block/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -32494,35 +29816,28 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.468">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1901" name="Constant_11922" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11922"/>
-			</rt_info>
+		<layer id="1824" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="572370506" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1902" name="onnx::Mul_3589" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3585, onnx::Div_3588, onnx::Mul_3589, onnx::Pow_3582, onnx::ReduceMean_3584, onnx::Sqrt_3587, onnx::Sub_3581"/>
-			</rt_info>
+		<layer id="1825" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -32530,263 +29845,138 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3589">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1903" name="Constant_150420" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="1164400896" size="5120"/>
+		<layer id="1826" name="Constant_20643" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1904" name="onnx::Add_3590" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3590"/>
-			</rt_info>
+		<layer id="1827" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>64</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3590">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>64</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1905" name="Constant_150421" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="1164406016" size="5120"/>
+		<layer id="1828" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="572370538" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1906" name="onnx::MatMul_3591" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3591"/>
-			</rt_info>
+		<layer id="1829" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>64</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3591">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
 					<dim>64</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1907" name="Constant_147911" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1164411136" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8786, q.103"/>
-			</rt_info>
+		<layer id="1830" name="Constant_85450_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="585482914" size="1966080" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1908" name="q.103" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1831" name="Constant_85450" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8786, q.103"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.103">
-					<dim>2</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1909" name="Constant_107475" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1910" name="onnx::Gather_3598" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3598, onnx::Gather_3601, onnx::Gather_3604"/>
-			</rt_info>
+		<layer id="1832" name="/mid_block/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3598,onnx::Gather_3601,onnx::Gather_3604">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1911" name="onnx::Gather_3605" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3605"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3605"/>
-			</output>
-		</layer>
-		<layer id="1912" name="Constant_11948" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11948"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1913" name="onnx::Div_3606" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11948, onnx::Div_3606, onnx::Gather_3605"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_3606"/>
-			</output>
-		</layer>
-		<layer id="1914" name="onnx::Div_3607" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3607"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3607"/>
-			</output>
-		</layer>
-		<layer id="1915" name="onnx::Cast_3608" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3608, onnx::Cast_3609, onnx::Div_3607, onnx::Unsqueeze_3610"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3608,onnx::Cast_3609,onnx::Unsqueeze_3610"/>
-			</output>
-		</layer>
-		<layer id="1916" name="onnx::Unsqueeze_3618" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3618"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3618">
-					<dim>1</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="1917" name="onnx::Concat_3619" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3619, onnx::Unsqueeze_3618"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3619">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1918" name="onnx::Reshape_3620" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3620"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1833" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
 			<output>
-				<port id="3" precision="I64">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1919" name="onnx::Transpose_3621" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3621"/>
-			</rt_info>
+		<layer id="1834" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>64</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -32794,33 +29984,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3621">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>64</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1920" name="Constant_12061" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12061"/>
-			</rt_info>
+		<layer id="1835" name="Constant_20659" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1921" name="onnx::Reshape_3622" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3622"/>
-			</rt_info>
+		<layer id="1836" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>64</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>160</dim>
 				</port>
@@ -32829,249 +30013,163 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3622">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>64</dim>
+					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1922" name="onnx::Gather_3599" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3599"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3599"/>
-			</output>
-		</layer>
-		<layer id="1923" name="Constant_11940" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11940"/>
-			</rt_info>
+		<layer id="1837" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1924" name="onnx::Unsqueeze_3600" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11940, onnx::Gather_3599, onnx::Unsqueeze_3600"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_3600"/>
-			</output>
-		</layer>
-		<layer id="1925" name="onnx::Mul_3623" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3623"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3623"/>
 			</output>
 		</layer>
-		<layer id="1926" name="onnx::Unsqueeze_3624" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3623, onnx::Unsqueeze_3624"/>
-			</rt_info>
+		<layer id="1838" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_3624"/>
-			</output>
-		</layer>
-		<layer id="1927" name="onnx::Unsqueeze_3629" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3629"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3629">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="1928" name="onnx::Concat_3630" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3630, onnx::Unsqueeze_3629"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3630">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1929" name="Constant_88608" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11944, onnx::Concat_3632, onnx::Gather_3602, onnx::Unsqueeze_3603, onnx::Unsqueeze_3631"/>
-			</rt_info>
+		<layer id="1839" name="Constant_87012_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1930" name="Constant_11944" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11944"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1931" name="onnx::Unsqueeze_3603" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="1840" name="Constant_87012" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_11944, onnx::Concat_3632, onnx::Gather_3602, onnx::Unsqueeze_3603, onnx::Unsqueeze_3631"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_3632">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="1932" name="onnx::Div_3625" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3625"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3625"/>
-			</output>
-		</layer>
-		<layer id="1933" name="onnx::Cast_3626" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3626, onnx::Cast_3627, onnx::Div_3625, onnx::Unsqueeze_3628"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3626,onnx::Cast_3627,onnx::Unsqueeze_3628"/>
-			</output>
-		</layer>
-		<layer id="1934" name="onnx::Unsqueeze_3633" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3633"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3633">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1935" name="onnx::Concat_3634" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3634, onnx::Unsqueeze_3633"/>
-			</rt_info>
+		<layer id="1841" name="Multiply_86201" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3634">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1936" name="onnx::Reshape_3635" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3635"/>
-			</rt_info>
+		<layer id="1842" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>64</dim>
+					<dim>160</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3635">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Add_output_0,/mid_block/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>64</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1937" name="q.107" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.107"/>
-			</rt_info>
+		<layer id="1843" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
+					<dim>16</dim>
 					<dim>64</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.107">
+				<port id="1" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/mid_block/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
-					<dim>160</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1938" name="Constant_147918" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="1170964736" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.103, onnx::MatMul_8787"/>
-			</rt_info>
+		<layer id="1844" name="Constant_85457_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="587448994" size="1966080" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1939" name="k.103" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1845" name="Constant_85457" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.103, onnx::MatMul_8787"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1846" name="/mid_block/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -33084,18 +30182,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.103">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1940" name="onnx::Transpose_3650" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3650"/>
-			</rt_info>
+		<layer id="1847" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1848" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -33107,7 +30210,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3650">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -33115,21 +30218,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1941" name="Constant_12182" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12182"/>
-			</rt_info>
+		<layer id="1849" name="Constant_20675" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1942" name="onnx::Reshape_3651" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3651"/>
-			</rt_info>
+		<layer id="1850" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -33142,7 +30239,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3651">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -33150,11 +30247,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="1943" name="k.107" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.107"/>
-			</rt_info>
+		<layer id="1851" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1852" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -33167,23 +30269,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.107">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1944" name="onnx::Mul_3687" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3687"/>
-			</rt_info>
+		<layer id="1853" name="/mid_block/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>64</dim>
-					<dim>160</dim>
+					<dim>77</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>16</dim>
@@ -33192,715 +30291,252 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3687">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
 					<dim>64</dim>
-					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1945" name="Constant_150422" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
+		<layer id="1854" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="578924162" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1946" name="onnx::Softmax_3689" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_3689"/>
-			</rt_info>
+		<layer id="1855" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>64</dim>
-					<dim>77</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_3689">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
 					<dim>64</dim>
-					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1947" name="attn.51" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.51"/>
-			</rt_info>
+		<layer id="1856" name="Constant_20701" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1857" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
+					<dim>8</dim>
 					<dim>64</dim>
-					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.51">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
 					<dim>64</dim>
-					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1948" name="Constant_147925" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="1174896896" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8788, v.103"/>
-			</rt_info>
+		<layer id="1858" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="569088562" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>768</dim>
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1949" name="v.103" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8788, v.103"/>
-			</rt_info>
+		<layer id="1859" name="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>768</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.103">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1950" name="onnx::Transpose_3675" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3675"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>1280</dim>
+					<dim>64</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3675">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1951" name="Constant_12190" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12190"/>
-			</rt_info>
+		<layer id="1860" name="Constant_85464_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="589415074" size="3276800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1952" name="onnx::Reshape_3676" type="Transpose" version="opset1">
+		<layer id="1861" name="Constant_85464" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3676"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3676">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1953" name="v.107" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.107"/>
-			</rt_info>
+		<layer id="1862" name="/mid_block/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.107">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1954" name="out.51" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.51"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
 					<dim>64</dim>
-					<dim>77</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.51">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
+					<dim>2</dim>
 					<dim>64</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1955" name="onnx::Gather_3692" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3692, onnx::Gather_3695, onnx::Gather_3698"/>
-			</rt_info>
+		<layer id="1863" name="/mid_block/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>64</dim>
-					<dim>160</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3692,onnx::Gather_3695,onnx::Gather_3698">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1956" name="onnx::Gather_3693" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3693"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3693"/>
-			</output>
-		</layer>
-		<layer id="1957" name="Constant_12202" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12202"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1958" name="onnx::Div_3694" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12202, onnx::Div_3694, onnx::Gather_3693"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_3694"/>
-			</output>
-		</layer>
-		<layer id="1959" name="onnx::Div_3701" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3701"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3701"/>
-			</output>
-		</layer>
-		<layer id="1960" name="onnx::Cast_3702" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3702, onnx::Cast_3703, onnx::Div_3701, onnx::Unsqueeze_3704"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3702,onnx::Cast_3703,onnx::Unsqueeze_3704"/>
-			</output>
-		</layer>
-		<layer id="1961" name="onnx::Unsqueeze_3706" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3706"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3706">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1962" name="onnx::Concat_3707" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3707, onnx::Unsqueeze_3706"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3707">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1963" name="Constant_90682" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3714"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1964" name="Constant_90683" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1965" name="Gather_90684" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3714"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1966" name="onnx::Reshape_3714" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_3714"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3714">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1967" name="onnx::Transpose_3715" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3715"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
 					<dim>64</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3715">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>64</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1968" name="Constant_12323" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12323"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1969" name="onnx::Reshape_3716" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3716"/>
-			</rt_info>
+		<layer id="1864" name="/mid_block/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>64</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1280</dim>
 				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3716">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>64</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1970" name="onnx::Div_3717" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3717"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_3717"/>
-			</output>
-		</layer>
-		<layer id="1971" name="onnx::Cast_3718" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_3718, onnx::Cast_3719, onnx::Div_3717, onnx::Unsqueeze_3720"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_3718,onnx::Cast_3719,onnx::Unsqueeze_3720"/>
-			</output>
-		</layer>
-		<layer id="1972" name="onnx::Unsqueeze_3723" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3723"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3723">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1973" name="onnx::Concat_3724" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3724, onnx::Unsqueeze_3723"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3724">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1974" name="Constant_88635" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12206, onnx::Concat_3726, onnx::Gather_3696, onnx::Unsqueeze_3697, onnx::Unsqueeze_3725"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1975" name="Constant_12206" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12206"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1976" name="onnx::Unsqueeze_3697" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12206, onnx::Concat_3726, onnx::Gather_3696, onnx::Unsqueeze_3697, onnx::Unsqueeze_3725"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_3726">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1977" name="onnx::Gather_3699" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3699"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3699"/>
-			</output>
-		</layer>
-		<layer id="1978" name="Constant_12210" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12210"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="1979" name="onnx::Unsqueeze_3700" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12210, onnx::Gather_3699, onnx::Unsqueeze_3700"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_3700"/>
-			</output>
-		</layer>
-		<layer id="1980" name="onnx::Mul_3721" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3721"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3721"/>
-			</output>
-		</layer>
-		<layer id="1981" name="onnx::Unsqueeze_3722" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3721, onnx::Unsqueeze_3722"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_3722"/>
-			</output>
-		</layer>
-		<layer id="1982" name="onnx::Unsqueeze_3727" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3727"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3727">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/Add_1_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1983" name="onnx::Concat_3728" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3728, onnx::Unsqueeze_3727"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="1865" name="Constant_20713" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_3728">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1984" name="onnx::Reshape_3729" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3729"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_3729">
-					<dim>3</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="1985" name="onnx::MatMul_3730" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3730"/>
-			</rt_info>
+		<layer id="1866" name="/mid_block/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>64</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3730">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1986" name="Constant_147932" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1178829056" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3732, onnx::MatMul_8809"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1987" name="onnx::Add_3732" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3732, onnx::MatMul_8809"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1867" name="Constant_87014_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="592691874" size="2560" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3732">
-					<dim>2</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1988" name="input.472" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1868" name="Constant_87014" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.472"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.472">
-					<dim>2</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1989" name="input.476" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.476"/>
-			</rt_info>
+		<layer id="1869" name="/mid_block/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -33908,68 +30544,51 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.476">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1990" name="Constant_12444" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12444"/>
-			</rt_info>
+		<layer id="1870" name="Constant_87015_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="592694434" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1991" name="onnx::Mul_3743" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="1871" name="Constant_87015" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3739, onnx::Div_3742, onnx::Mul_3743, onnx::Pow_3736, onnx::ReduceMean_3738, onnx::Sqrt_3741, onnx::Sub_3735"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3743">
-					<dim>2</dim>
-					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="1992" name="Constant_150424" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="1185382656" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1993" name="onnx::Add_3744" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3744"/>
-			</rt_info>
+		<layer id="1872" name="/mid_block/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -33983,65 +30602,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3744">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1994" name="Constant_150425" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="1185387776" size="5120"/>
+		<layer id="1873" name="Constant_85472_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="10240, 1280" offset="592696994" size="26214400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1995" name="onnx::MatMul_3745" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1874" name="Constant_85472" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_3745"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_3745">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="1996" name="Constant_147940" type="Const" version="opset1">
-			<data element_type="f32" shape="10240, 1280" offset="1185392896" size="52428800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3747, onnx::MatMul_8810"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1997" name="onnx::Add_3747" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3747, onnx::MatMul_8810"/>
-			</rt_info>
+		<layer id="1875" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34054,18 +30650,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3747">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1998" name="onnx::Shape_3748" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_3748"/>
-			</rt_info>
+		<layer id="1876" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -34079,51 +30672,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_3748">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="1999" name="Constant_126873" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1877" name="Constant_77414" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2000" name="Constant_126874" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1878" name="Constant_77415" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2001" name="Constant_126870" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1879" name="Constant_77411" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2002" name="onnx::Gather_3749" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3749"/>
-			</rt_info>
+		<layer id="1880" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34132,36 +30713,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_3749">
+				<port id="1" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2003" name="onnx::Gather_3750" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1881" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_3750">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2004" name="Constant_12461" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12461"/>
-			</rt_info>
+		<layer id="1882" name="Constant_20730" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="2005" name="onnx::Add_3751" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12461, onnx::Add_3751, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1883" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -34169,30 +30741,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_3751">
+				<port id="3" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2006" name="onnx::Add_3753" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3753"/>
-			</rt_info>
+		<layer id="1884" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_3753">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2007" name="onnx::Div_3754" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3753, onnx::Div_3754"/>
-			</rt_info>
+		<layer id="1885" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -34202,27 +30768,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_3754">
+				<port id="2" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2008" name="onnx::Div_3755" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3755"/>
-			</rt_info>
+		<layer id="1886" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_3755">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2009" name="onnx::Mul_3756" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3755, onnx::Mul_3756, onnx::Mul_3757, onnx::Slice_3758"/>
-			</rt_info>
+		<layer id="1887" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -34232,26 +30792,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_3756,onnx::Slice_3758">
+				<port id="2" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2010" name="Constant_126869" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1888" name="Constant_77410" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2011" name="ScatterUpdate_126875" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1889" name="ScatterUpdate_77416" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -34272,22 +30826,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2012" name="Constant_126878" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1890" name="Constant_77419" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2013" name="onnx::Mul_3759" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12472, onnx::Gather_3750, onnx::Mul_3759"/>
-			</rt_info>
+		<layer id="1891" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34305,50 +30853,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_3759">
+				<port id="4" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2014" name="Constant_126942" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1892" name="Constant_77483" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2015" name="Constant_126941" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1893" name="Constant_77482" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2016" name="Constant_126940" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1894" name="Constant_77481" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2017" name="ScatterUpdate_126943" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1895" name="ScatterUpdate_77484" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -34369,33 +30905,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2018" name="Constant_126944" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1896" name="Constant_77485" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2019" name="onnx::Mul_3760" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3760"/>
-			</rt_info>
+		<layer id="1897" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_3760">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2020" name="onnx::Slice_3761" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3760, onnx::Slice_3761"/>
-			</rt_info>
+		<layer id="1898" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -34405,15 +30932,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_3761">
+				<port id="2" precision="I64" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2021" name="ScatterUpdate_126945" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1899" name="ScatterUpdate_77486" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -34434,22 +30958,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2022" name="Constant_126948" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1900" name="Constant_77489" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2023" name="onnx::Div_3762" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_12541, onnx::Div_3762, onnx::Gather_3750"/>
-			</rt_info>
+		<layer id="1901" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34467,18 +30985,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_3762">
+				<port id="4" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2024" name="onnx::Mul_3770" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3765, onnx::Erf_3764, onnx::Mul_3767, onnx::Mul_3768, onnx::Mul_3770"/>
-			</rt_info>
+		<layer id="1902" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34487,18 +31002,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_3770">
+				<port id="1" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2025" name="input.480" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.480"/>
-			</rt_info>
+		<layer id="1903" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34512,54 +31024,63 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.480">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2026" name="Constant_147948" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 5120" offset="1237821696" size="26214400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3773, onnx::MatMul_8811"/>
-			</rt_info>
+		<layer id="1904" name="Constant_85480_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 5120" offset="618911394" size="13107200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2027" name="onnx::Add_3773" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="1905" name="Constant_85480" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3773, onnx::MatMul_8811"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3773">
-					<dim>2</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2028" name="onnx::Add_3774" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3774"/>
-			</rt_info>
+		<layer id="1906" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1907" name="/mid_block/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -34573,18 +31094,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3774">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2029" name="onnx::Reshape_3775" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3775"/>
-			</rt_info>
+		<layer id="1908" name="/mid_block/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34598,29 +31116,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3775">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2030" name="onnx::Reshape_8816" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="1264036096" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8816"/>
-			</rt_info>
+		<layer id="1909" name="/mid_block/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="632018594" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8816">
+				<port id="0" precision="I64" names="/mid_block/attentions.0/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2031" name="onnx::Transpose_3785" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_3785"/>
-			</rt_info>
+		<layer id="1910" name="/mid_block/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34632,7 +31144,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_3785">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>8</dim>
@@ -34640,21 +31152,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2032" name="Constant_12625" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12625"/>
-			</rt_info>
+		<layer id="1911" name="Constant_20895" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2033" name="input.484" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.484"/>
-			</rt_info>
+		<layer id="1912" name="/mid_block/attentions.0/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34667,7 +31173,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.484">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34675,13 +31181,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2034" name="m.mid_block.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="1264036128" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.attentions.0.proj_out.weight"/>
-			</rt_info>
+		<layer id="1913" name="mid_block.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="632018626" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.attentions.0.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -34689,11 +31192,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2035" name="Convolution_12627" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1914" name="mid_block.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_12627"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="mid_block.attentions.0.proj_out.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1915" name="/mid_block/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34717,10 +31239,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2036" name="Reshape_12647" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1270589728" size="5120"/>
+		<layer id="1916" name="Reshape_20917_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="635295426" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -34728,11 +31250,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2037" name="onnx::Add_3787" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1917" name="Reshape_20917" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_12646, Reshape_12647, onnx::Add_3787"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1918" name="/mid_block/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34748,7 +31289,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3787">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34756,11 +31297,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2038" name="onnx::Cast_3788" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.488, onnx::Cast_3788"/>
-			</rt_info>
+		<layer id="1919" name="/mid_block/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34776,7 +31314,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.488,onnx::Cast_3788">
+				<port id="2" precision="FP32" names="/mid_block/attentions.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34784,22 +31322,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2039" name="onnx::Reshape_3790" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3790"/>
-			</rt_info>
+		<layer id="1920" name="/mid_block/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3790">
+				<port id="0" precision="I64" names="/mid_block/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2040" name="onnx::InstanceNormalization_3791" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3791"/>
-			</rt_info>
+		<layer id="1921" name="/mid_block/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34812,29 +31344,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3791">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2041" name="Constant_12686" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12686"/>
-			</rt_info>
+		<layer id="1922" name="Constant_20955" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2042" name="MVN_12687" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_12706, Concat_12751, MVN_12687, Multiply_12734, Reshape_12707, Reshape_12752, onnx::Reshape_3794"/>
-			</rt_info>
+		<layer id="1923" name="MVN_20956" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34846,18 +31372,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3794">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2043" name="onnx::Reshape_3795" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3795"/>
-			</rt_info>
+		<layer id="1924" name="/mid_block/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34867,16 +31390,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3795">
+				<port id="1" precision="I64" names="/mid_block/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2044" name="onnx::Mul_3796" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3796"/>
-			</rt_info>
+		<layer id="1925" name="/mid_block/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34888,7 +31408,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3796">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34896,10 +31416,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2045" name="Constant_150428" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1270594848" size="5120"/>
+		<layer id="1926" name="Constant_87018_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="635297986" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -34907,11 +31427,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2046" name="onnx::Add_3799" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1927" name="Constant_87018" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3799"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1928" name="/mid_block/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34927,7 +31466,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3799">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34935,10 +31474,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2047" name="Constant_150429" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1270599968" size="5120"/>
+		<layer id="1929" name="Constant_87019_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="635300546" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -34946,11 +31485,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2048" name="onnx::Cast_3802" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1930" name="Constant_87019" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.492, onnx::Cast_3802"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1931" name="/mid_block/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34966,7 +31524,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.492,onnx::Cast_3802">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34974,10 +31532,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2049" name="input.496" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.496, onnx::Mul_3804"/>
-			</rt_info>
+		<layer id="1932" name="/mid_block/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -34987,7 +31542,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.496">
+				<port id="1" precision="FP32" names="/mid_block/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -34995,13 +31550,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2050" name="m.mid_block.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1270605088" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="1933" name="mid_block.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="635303106" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.resnets.1.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -35009,11 +31561,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2051" name="Convolution_12792" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1934" name="mid_block.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_12792"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="mid_block.resnets.1.conv1.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1935" name="/mid_block/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35037,10 +31608,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2052" name="Reshape_12812" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1329587488" size="5120"/>
+		<layer id="1936" name="Reshape_21080_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="664794306" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -35048,11 +31619,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2053" name="onnx::Add_3806" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1937" name="Reshape_21080" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_12811, Reshape_12812, onnx::Add_3806"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1938" name="/mid_block/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35068,7 +31658,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3806">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35076,40 +31666,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2054" name="onnx::Gemm_3808" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3808, onnx::Mul_3807"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1939" name="mid_block.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="664796866" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3808">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2055" name="m.mid_block.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1329592608" size="6553600"/>
+		<layer id="1940" name="mid_block.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.resnets.1.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.resnets.1.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="mid_block.resnets.1.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2056" name="MatMul_12844" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_12844"/>
-			</rt_info>
+		<layer id="1941" name="/mid_block/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35127,20 +31712,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2057" name="Constant_150430" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="1336146208" size="5120"/>
+		<layer id="1942" name="Constant_87020_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="668073666" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2058" name="onnx::Unsqueeze_3809" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1943" name="Constant_87020" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_12845, onnx::Unsqueeze_3809"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1944" name="/mid_block/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35152,27 +31752,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3809">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2059" name="onnx::Unsqueeze_3810" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3810"/>
-			</rt_info>
+		<layer id="1945" name="/mid_block/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3810">
+				<port id="0" precision="I64" names="/mid_block/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2060" name="onnx::Unsqueeze_3811" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3811"/>
-			</rt_info>
+		<layer id="1946" name="/mid_block/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35183,28 +31777,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3811">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2061" name="onnx::Unsqueeze_3812" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3812"/>
-			</rt_info>
+		<layer id="1947" name="/mid_block/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3812">
+				<port id="0" precision="I64" names="/mid_block/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2062" name="onnx::Add_3813" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3813"/>
-			</rt_info>
+		<layer id="1948" name="/mid_block/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35216,7 +31804,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3813">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -35224,11 +31812,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2063" name="onnx::Cast_3814" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.500, onnx::Cast_3814"/>
-			</rt_info>
+		<layer id="1949" name="/mid_block/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35244,7 +31829,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.500,onnx::Cast_3814">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35252,22 +31837,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2064" name="onnx::Reshape_3816" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3816"/>
-			</rt_info>
+		<layer id="1950" name="/mid_block/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3816">
+				<port id="0" precision="I64" names="/mid_block/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2065" name="onnx::InstanceNormalization_3817" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3817"/>
-			</rt_info>
+		<layer id="1951" name="/mid_block/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35280,29 +31859,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3817">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2066" name="Constant_12862" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_12862"/>
-			</rt_info>
+		<layer id="1952" name="Constant_21128" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2067" name="MVN_12863" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_12882, Concat_12927, MVN_12863, Multiply_12910, Reshape_12883, Reshape_12928, onnx::Reshape_3820"/>
-			</rt_info>
+		<layer id="1953" name="MVN_21129" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35314,18 +31887,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3820">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2068" name="onnx::Reshape_3821" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3821"/>
-			</rt_info>
+		<layer id="1954" name="/mid_block/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35335,16 +31905,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3821">
+				<port id="1" precision="I64" names="/mid_block/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2069" name="onnx::Mul_3822" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3822"/>
-			</rt_info>
+		<layer id="1955" name="/mid_block/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35356,7 +31923,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3822">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35364,10 +31931,68 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2070" name="Constant_150431" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1336151328" size="5120"/>
+		<layer id="1956" name="Constant_87021_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="668076226" size="2560" />
 			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1957" name="Constant_87021" type="Convert" version="opset1">
+			<data destination_type="f32" />
+			<rt_info>
+				<attribute name="decompression" version="0" />
+			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1958" name="/mid_block/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>8</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>8</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1959" name="Constant_87022_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="668078786" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -35375,11 +32000,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2071" name="onnx::Add_3825" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1960" name="Constant_87022" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3825"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1961" name="/mid_block/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35395,7 +32039,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3825">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35403,22 +32047,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2072" name="Constant_150432" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1336156448" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2073" name="onnx::Cast_3828" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.504, onnx::Cast_3828"/>
-			</rt_info>
+		<layer id="1962" name="/mid_block/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35426,15 +32055,9 @@
 					<dim>8</dim>
 					<dim>8</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.504,onnx::Cast_3828">
+				<port id="1" precision="FP32" names="/mid_block/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35442,34 +32065,32 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2074" name="input.508" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.508, onnx::Mul_3830"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>8</dim>
-					<dim>8</dim>
-				</port>
-			</input>
+		<layer id="1963" name="mid_block.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="668081346" size="29491200" />
 			<output>
-				<port id="1" precision="FP32" names="input.508">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-					<dim>8</dim>
-					<dim>8</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2075" name="m.mid_block.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1336161568" size="58982400"/>
+		<layer id="1964" name="mid_block.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.mid_block.resnets.1.conv2.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.mid_block.resnets.1.conv2.weight">
+				<port id="1" precision="FP32" names="mid_block.resnets.1.conv2.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -35477,11 +32098,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2076" name="Convolution_12968" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_12968"/>
-			</rt_info>
+		<layer id="1965" name="/mid_block/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35505,10 +32123,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2077" name="Reshape_12988" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1395143968" size="5120"/>
+		<layer id="1966" name="Reshape_21253_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="697572546" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -35516,11 +32134,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2078" name="onnx::Add_3832" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1967" name="Reshape_21253" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_12987, Reshape_12988, onnx::Add_3832"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1968" name="/mid_block/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35536,7 +32173,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3832">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35544,11 +32181,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2079" name="onnx::Div_3833" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3835, onnx::Div_3833"/>
-			</rt_info>
+		<layer id="1969" name="/mid_block/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35564,7 +32198,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_3835,onnx::Div_3833">
+				<port id="2" precision="FP32" names="/mid_block/resnets.1/Add_1_output_0,/mid_block/resnets.1/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35572,11 +32206,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2080" name="onnx::Cast_3836" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.512, onnx::Cast_3836"/>
-			</rt_info>
+		<layer id="1970" name="/up_blocks.0/Concat" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35592,7 +32223,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.512,onnx::Cast_3836">
+				<port id="2" precision="FP32" names="/up_blocks.0/Concat_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -35600,13 +32231,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2081" name="m.up_blocks.0.resnets.0.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 1, 1" offset="1395149088" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.0.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="1971" name="up_blocks.0.resnets.0.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 1, 1" offset="697575106" size="6553600" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.0.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -35614,11 +32242,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2082" name="Convolution_13360" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="1972" name="up_blocks.0.resnets.0.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13360"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.0.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1973" name="/up_blocks.0/resnets.0/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35642,10 +32289,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2083" name="Reshape_13380" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1408256288" size="5120"/>
+		<layer id="1974" name="Reshape_21640_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="704128706" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -35653,11 +32300,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2084" name="onnx::Add_3881" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1975" name="Reshape_21640" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13379, Reshape_13380, onnx::Add_3881"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1976" name="/up_blocks.0/resnets.0/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35673,7 +32339,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3881">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35681,22 +32347,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2085" name="onnx::Reshape_3838" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3838"/>
-			</rt_info>
+		<layer id="1977" name="/up_blocks.0/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3838">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2086" name="onnx::InstanceNormalization_3839" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3839"/>
-			</rt_info>
+		<layer id="1978" name="/up_blocks.0/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35709,29 +32369,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3839">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2087" name="Constant_13030" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_13030"/>
-			</rt_info>
+		<layer id="1979" name="Constant_21294" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2088" name="MVN_13031" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13050, Concat_13095, MVN_13031, Multiply_13078, Reshape_13051, Reshape_13096, onnx::Reshape_3842"/>
-			</rt_info>
+		<layer id="1980" name="MVN_21295" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35743,18 +32397,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3842">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2089" name="onnx::Reshape_3843" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3843"/>
-			</rt_info>
+		<layer id="1981" name="/up_blocks.0/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35764,16 +32415,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3843">
+				<port id="1" precision="I64" names="/up_blocks.0/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2090" name="onnx::Mul_3844" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3844"/>
-			</rt_info>
+		<layer id="1982" name="/up_blocks.0/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35785,7 +32433,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3844">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -35793,10 +32441,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2091" name="Constant_150433" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="1408261408" size="10240"/>
+		<layer id="1983" name="Constant_87023_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="704131266" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -35804,11 +32452,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2092" name="onnx::Add_3847" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1984" name="Constant_87023" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3847"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1985" name="/up_blocks.0/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35824,7 +32491,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3847">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -35832,10 +32499,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2093" name="Constant_150434" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="1408271648" size="10240"/>
+		<layer id="1986" name="Constant_87024_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="704136386" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -35843,11 +32510,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2094" name="onnx::Cast_3850" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1987" name="Constant_87024" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.516, onnx::Cast_3850"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1988" name="/up_blocks.0/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35863,7 +32549,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.516,onnx::Cast_3850">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -35871,10 +32557,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2095" name="input.520" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.520, onnx::Mul_3852"/>
-			</rt_info>
+		<layer id="1989" name="/up_blocks.0/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35884,7 +32567,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.520">
+				<port id="1" precision="FP32" names="/up_blocks.0/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -35892,13 +32575,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2096" name="m.up_blocks.0.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 3, 3" offset="1408281888" size="117964800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="1990" name="up_blocks.0.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 3, 3" offset="704141506" size="58982400" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>3</dim>
@@ -35906,11 +32586,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2097" name="Convolution_13136" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="1991" name="up_blocks.0.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13136"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.0.conv1.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1992" name="/up_blocks.0/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35934,10 +32633,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2098" name="Reshape_13156" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1526246688" size="5120"/>
+		<layer id="1993" name="Reshape_21419_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="763123906" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -35945,11 +32644,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2099" name="onnx::Add_3854" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="1994" name="Reshape_21419" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13155, Reshape_13156, onnx::Add_3854"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="1995" name="/up_blocks.0/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -35965,7 +32683,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3854">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -35973,40 +32691,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2100" name="onnx::Gemm_3856" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3856, onnx::Mul_3855"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="1996" name="up_blocks.0.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="763126466" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3856">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2101" name="m.up_blocks.0.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1526251808" size="6553600"/>
+		<layer id="1997" name="up_blocks.0.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.0.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2102" name="MatMul_13188" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_13188"/>
-			</rt_info>
+		<layer id="1998" name="/up_blocks.0/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36024,20 +32737,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2103" name="Constant_150435" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="1532805408" size="5120"/>
+		<layer id="1999" name="Constant_87025_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="766403266" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2104" name="onnx::Unsqueeze_3857" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2000" name="Constant_87025" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_13189, onnx::Unsqueeze_3857"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2001" name="/up_blocks.0/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36049,27 +32777,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3857">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2105" name="onnx::Unsqueeze_3858" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3858"/>
-			</rt_info>
+		<layer id="2002" name="/up_blocks.0/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3858">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2106" name="onnx::Unsqueeze_3859" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3859"/>
-			</rt_info>
+		<layer id="2003" name="/up_blocks.0/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36080,28 +32802,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3859">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2107" name="onnx::Unsqueeze_3860" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3860"/>
-			</rt_info>
+		<layer id="2004" name="/up_blocks.0/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3860">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2108" name="onnx::Add_3861" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3861"/>
-			</rt_info>
+		<layer id="2005" name="/up_blocks.0/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36113,7 +32829,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3861">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -36121,11 +32837,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2109" name="onnx::Cast_3862" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.524, onnx::Cast_3862"/>
-			</rt_info>
+		<layer id="2006" name="/up_blocks.0/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36141,7 +32854,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.524,onnx::Cast_3862">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36149,22 +32862,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2110" name="onnx::Reshape_3864" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3864"/>
-			</rt_info>
+		<layer id="2007" name="/up_blocks.0/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3864">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.0/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2111" name="onnx::InstanceNormalization_3865" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3865"/>
-			</rt_info>
+		<layer id="2008" name="/up_blocks.0/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36177,29 +32884,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3865">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2112" name="Constant_13206" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_13206"/>
-			</rt_info>
+		<layer id="2009" name="Constant_21467" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2113" name="MVN_13207" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13226, Concat_13271, MVN_13207, Multiply_13254, Reshape_13227, Reshape_13272, onnx::Reshape_3868"/>
-			</rt_info>
+		<layer id="2010" name="MVN_21468" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36211,18 +32912,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3868">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2114" name="onnx::Reshape_3869" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3869"/>
-			</rt_info>
+		<layer id="2011" name="/up_blocks.0/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36232,16 +32930,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3869">
+				<port id="1" precision="I64" names="/up_blocks.0/resnets.0/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2115" name="onnx::Mul_3870" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3870"/>
-			</rt_info>
+		<layer id="2012" name="/up_blocks.0/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36253,7 +32948,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3870">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36261,10 +32956,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2116" name="Constant_150436" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1532810528" size="5120"/>
+		<layer id="2013" name="Constant_87026_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="766405826" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -36272,11 +32967,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2117" name="onnx::Add_3873" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2014" name="Constant_87026" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3873"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2015" name="/up_blocks.0/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36292,7 +33006,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3873">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36300,10 +33014,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2118" name="Constant_150437" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1532815648" size="5120"/>
+		<layer id="2016" name="Constant_87027_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="766408386" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -36311,11 +33025,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2119" name="onnx::Cast_3876" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2017" name="Constant_87027" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.528, onnx::Cast_3876"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2018" name="/up_blocks.0/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36331,7 +33064,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.528,onnx::Cast_3876">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36339,10 +33072,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2120" name="input.532" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.532, onnx::Mul_3878"/>
-			</rt_info>
+		<layer id="2019" name="/up_blocks.0/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36352,7 +33082,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.532">
+				<port id="1" precision="FP32" names="/up_blocks.0/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36360,13 +33090,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2121" name="m.up_blocks.0.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1532820768" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="2020" name="up_blocks.0.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="766410946" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -36374,11 +33101,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2122" name="Convolution_13312" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2021" name="up_blocks.0.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13312"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.0.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2022" name="/up_blocks.0/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36402,10 +33148,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2123" name="Reshape_13332" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1591803168" size="5120"/>
+		<layer id="2023" name="Reshape_21592_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="795902146" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -36413,11 +33159,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2124" name="onnx::Add_3880" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2024" name="Reshape_21592" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13331, Reshape_13332, onnx::Add_3880"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2025" name="/up_blocks.0/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36433,7 +33198,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3880">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36441,11 +33206,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2125" name="onnx::Div_3882" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3884, onnx::Div_3882"/>
-			</rt_info>
+		<layer id="2026" name="/up_blocks.0/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36461,7 +33223,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_3884,onnx::Div_3882">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.0/Add_1_output_0,/up_blocks.0/resnets.0/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36469,11 +33231,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2126" name="onnx::Cast_3885" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.536, onnx::Cast_3885"/>
-			</rt_info>
+		<layer id="2027" name="/up_blocks.0/Concat_1" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36489,7 +33248,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.536,onnx::Cast_3885">
+				<port id="2" precision="FP32" names="/up_blocks.0/Concat_1_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -36497,13 +33256,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2127" name="m.up_blocks.0.resnets.1.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 1, 1" offset="1591808288" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.1.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="2028" name="up_blocks.0.resnets.1.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 1, 1" offset="795904706" size="6553600" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.1.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -36511,11 +33267,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2128" name="Convolution_13752" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2029" name="up_blocks.0.resnets.1.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13752"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.1.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2030" name="/up_blocks.0/resnets.1/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36539,10 +33314,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2129" name="Reshape_13772" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1604915488" size="5120"/>
+		<layer id="2031" name="Reshape_22027_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="802458306" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -36550,11 +33325,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2130" name="onnx::Add_3930" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2032" name="Reshape_22027" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13771, Reshape_13772, onnx::Add_3930"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2033" name="/up_blocks.0/resnets.1/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36570,7 +33364,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3930">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36578,22 +33372,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2131" name="onnx::Reshape_3887" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3887"/>
-			</rt_info>
+		<layer id="2034" name="/up_blocks.0/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3887">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2132" name="onnx::InstanceNormalization_3888" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3888"/>
-			</rt_info>
+		<layer id="2035" name="/up_blocks.0/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36606,29 +33394,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3888">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2133" name="Constant_13422" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_13422"/>
-			</rt_info>
+		<layer id="2036" name="Constant_21681" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2134" name="MVN_13423" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13442, Concat_13487, MVN_13423, Multiply_13470, Reshape_13443, Reshape_13488, onnx::Reshape_3891"/>
-			</rt_info>
+		<layer id="2037" name="MVN_21682" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36640,18 +33422,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3891">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2135" name="onnx::Reshape_3892" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3892"/>
-			</rt_info>
+		<layer id="2038" name="/up_blocks.0/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36661,16 +33440,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3892">
+				<port id="1" precision="I64" names="/up_blocks.0/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2136" name="onnx::Mul_3893" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3893"/>
-			</rt_info>
+		<layer id="2039" name="/up_blocks.0/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36682,7 +33458,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3893">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -36690,10 +33466,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2137" name="Constant_150438" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="1604920608" size="10240"/>
+		<layer id="2040" name="Constant_87028_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="802460866" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -36701,11 +33477,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2138" name="onnx::Add_3896" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2041" name="Constant_87028" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3896"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2042" name="/up_blocks.0/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36721,7 +33516,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3896">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -36729,10 +33524,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2139" name="Constant_150439" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="1604930848" size="10240"/>
+		<layer id="2043" name="Constant_87029_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="802465986" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -36740,11 +33535,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2140" name="onnx::Cast_3899" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2044" name="Constant_87029" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.540, onnx::Cast_3899"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2045" name="/up_blocks.0/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36760,7 +33574,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.540,onnx::Cast_3899">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -36768,10 +33582,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2141" name="input.544" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.544, onnx::Mul_3901"/>
-			</rt_info>
+		<layer id="2046" name="/up_blocks.0/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36781,7 +33592,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.544">
+				<port id="1" precision="FP32" names="/up_blocks.0/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -36789,13 +33600,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2142" name="m.up_blocks.0.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 3, 3" offset="1604941088" size="117964800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="2047" name="up_blocks.0.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 3, 3" offset="802471106" size="58982400" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.1.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>3</dim>
@@ -36803,11 +33611,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2143" name="Convolution_13528" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2048" name="up_blocks.0.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13528"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.1.conv1.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2049" name="/up_blocks.0/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36831,10 +33658,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2144" name="Reshape_13548" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1722905888" size="5120"/>
+		<layer id="2050" name="Reshape_21806_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="861453506" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -36842,11 +33669,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2145" name="onnx::Add_3903" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2051" name="Reshape_21806" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13547, Reshape_13548, onnx::Add_3903"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2052" name="/up_blocks.0/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36862,7 +33708,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3903">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -36870,40 +33716,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2146" name="onnx::Gemm_3905" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3905, onnx::Mul_3904"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="2053" name="up_blocks.0.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="861456066" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3905">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2147" name="m.up_blocks.0.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1722911008" size="6553600"/>
+		<layer id="2054" name="up_blocks.0.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.1.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.1.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.1.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2148" name="MatMul_13580" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_13580"/>
-			</rt_info>
+		<layer id="2055" name="/up_blocks.0/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36921,20 +33762,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2149" name="Constant_150440" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="1729464608" size="5120"/>
+		<layer id="2056" name="Constant_87030_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="864732866" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2150" name="onnx::Unsqueeze_3906" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2057" name="Constant_87030" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_13581, onnx::Unsqueeze_3906"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2058" name="/up_blocks.0/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36946,27 +33802,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3906">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2151" name="onnx::Unsqueeze_3907" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3907"/>
-			</rt_info>
+		<layer id="2059" name="/up_blocks.0/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3907">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2152" name="onnx::Unsqueeze_3908" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3908"/>
-			</rt_info>
+		<layer id="2060" name="/up_blocks.0/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -36977,28 +33827,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3908">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2153" name="onnx::Unsqueeze_3909" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3909"/>
-			</rt_info>
+		<layer id="2061" name="/up_blocks.0/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3909">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2154" name="onnx::Add_3910" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3910"/>
-			</rt_info>
+		<layer id="2062" name="/up_blocks.0/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37010,7 +33854,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3910">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37018,11 +33862,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2155" name="onnx::Cast_3911" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.548, onnx::Cast_3911"/>
-			</rt_info>
+		<layer id="2063" name="/up_blocks.0/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37038,7 +33879,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.548,onnx::Cast_3911">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37046,22 +33887,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2156" name="onnx::Reshape_3913" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3913"/>
-			</rt_info>
+		<layer id="2064" name="/up_blocks.0/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3913">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2157" name="onnx::InstanceNormalization_3914" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3914"/>
-			</rt_info>
+		<layer id="2065" name="/up_blocks.0/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37074,29 +33909,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3914">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2158" name="Constant_13598" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_13598"/>
-			</rt_info>
+		<layer id="2066" name="Constant_21854" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2159" name="MVN_13599" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13618, Concat_13663, MVN_13599, Multiply_13646, Reshape_13619, Reshape_13664, onnx::Reshape_3917"/>
-			</rt_info>
+		<layer id="2067" name="MVN_21855" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37108,18 +33937,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3917">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2160" name="onnx::Reshape_3918" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3918"/>
-			</rt_info>
+		<layer id="2068" name="/up_blocks.0/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37129,16 +33955,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3918">
+				<port id="1" precision="I64" names="/up_blocks.0/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2161" name="onnx::Mul_3919" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3919"/>
-			</rt_info>
+		<layer id="2069" name="/up_blocks.0/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37150,7 +33973,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3919">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37158,10 +33981,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2162" name="Constant_150441" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1729469728" size="5120"/>
+		<layer id="2070" name="Constant_87031_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="864735426" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37169,11 +33992,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2163" name="onnx::Add_3922" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2071" name="Constant_87031" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3922"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2072" name="/up_blocks.0/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37189,7 +34031,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3922">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37197,10 +34039,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2164" name="Constant_150442" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1729474848" size="5120"/>
+		<layer id="2073" name="Constant_87032_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="864737986" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37208,11 +34050,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2165" name="onnx::Cast_3925" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2074" name="Constant_87032" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.552, onnx::Cast_3925"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2075" name="/up_blocks.0/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37228,7 +34089,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.552,onnx::Cast_3925">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37236,10 +34097,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2166" name="input.556" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.556, onnx::Mul_3927"/>
-			</rt_info>
+		<layer id="2076" name="/up_blocks.0/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37249,7 +34107,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.556">
+				<port id="1" precision="FP32" names="/up_blocks.0/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37257,13 +34115,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2167" name="m.up_blocks.0.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1729479968" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="2077" name="up_blocks.0.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="864740546" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -37271,11 +34126,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2168" name="Convolution_13704" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2078" name="up_blocks.0.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13704"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.1.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2079" name="/up_blocks.0/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37299,10 +34173,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2169" name="Reshape_13724" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1788462368" size="5120"/>
+		<layer id="2080" name="Reshape_21979_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="894231746" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37310,11 +34184,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2170" name="onnx::Add_3929" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2081" name="Reshape_21979" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13723, Reshape_13724, onnx::Add_3929"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2082" name="/up_blocks.0/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37330,7 +34223,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3929">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37338,11 +34231,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2171" name="onnx::Div_3931" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_3933, onnx::Div_3931"/>
-			</rt_info>
+		<layer id="2083" name="/up_blocks.0/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37358,7 +34248,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_3933,onnx::Div_3931">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.1/Add_1_output_0,/up_blocks.0/resnets.1/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37366,11 +34256,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2172" name="onnx::Cast_3934" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.560, onnx::Cast_3934"/>
-			</rt_info>
+		<layer id="2084" name="/up_blocks.0/Concat_2" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37386,7 +34273,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.560,onnx::Cast_3934">
+				<port id="2" precision="FP32" names="/up_blocks.0/Concat_2_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -37394,13 +34281,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2173" name="m.up_blocks.0.resnets.2.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 1, 1" offset="1788467488" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.2.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="2085" name="up_blocks.0.resnets.2.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 1, 1" offset="894234306" size="6553600" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.2.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -37408,11 +34292,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2174" name="Convolution_14144" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2086" name="up_blocks.0.resnets.2.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14144"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.2.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2087" name="/up_blocks.0/resnets.2/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37436,10 +34339,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2175" name="Reshape_14164" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1801574688" size="5120"/>
+		<layer id="2088" name="Reshape_22414_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="900787906" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37447,11 +34350,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2176" name="onnx::Add_3979" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2089" name="Reshape_22414" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14163, Reshape_14164, onnx::Add_3979"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2090" name="/up_blocks.0/resnets.2/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37467,7 +34389,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3979">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37475,22 +34397,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2177" name="onnx::Reshape_3936" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3936"/>
-			</rt_info>
+		<layer id="2091" name="/up_blocks.0/resnets.2/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3936">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.2/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2178" name="onnx::InstanceNormalization_3937" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3937"/>
-			</rt_info>
+		<layer id="2092" name="/up_blocks.0/resnets.2/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37503,29 +34419,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3937">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2179" name="Constant_13814" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_13814"/>
-			</rt_info>
+		<layer id="2093" name="Constant_22068" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2180" name="MVN_13815" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13834, Concat_13879, MVN_13815, Multiply_13862, Reshape_13835, Reshape_13880, onnx::Reshape_3940"/>
-			</rt_info>
+		<layer id="2094" name="MVN_22069" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37537,18 +34447,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3940">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2181" name="onnx::Reshape_3941" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3941"/>
-			</rt_info>
+		<layer id="2095" name="/up_blocks.0/resnets.2/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37558,16 +34465,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3941">
+				<port id="1" precision="I64" names="/up_blocks.0/resnets.2/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2182" name="onnx::Mul_3942" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3942"/>
-			</rt_info>
+		<layer id="2096" name="/up_blocks.0/resnets.2/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37579,7 +34483,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3942">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -37587,10 +34491,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2183" name="Constant_150443" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="1801579808" size="10240"/>
+		<layer id="2097" name="Constant_87033_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="900790466" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -37598,11 +34502,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2184" name="onnx::Add_3945" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2098" name="Constant_87033" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3945"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2099" name="/up_blocks.0/resnets.2/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37618,7 +34541,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3945">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -37626,10 +34549,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2185" name="Constant_150444" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="1801590048" size="10240"/>
+		<layer id="2100" name="Constant_87034_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="900795586" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -37637,11 +34560,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2186" name="onnx::Cast_3948" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2101" name="Constant_87034" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.564, onnx::Cast_3948"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2102" name="/up_blocks.0/resnets.2/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37657,7 +34599,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.564,onnx::Cast_3948">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -37665,10 +34607,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2187" name="input.568" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.568, onnx::Mul_3950"/>
-			</rt_info>
+		<layer id="2103" name="/up_blocks.0/resnets.2/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37678,7 +34617,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.568">
+				<port id="1" precision="FP32" names="/up_blocks.0/resnets.2/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>8</dim>
@@ -37686,13 +34625,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2188" name="m.up_blocks.0.resnets.2.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 3, 3" offset="1801600288" size="117964800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.2.conv1.weight"/>
-			</rt_info>
+		<layer id="2104" name="up_blocks.0.resnets.2.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 3, 3" offset="900800706" size="58982400" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.2.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>3</dim>
@@ -37700,11 +34636,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2189" name="Convolution_13920" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2105" name="up_blocks.0.resnets.2.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_13920"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.2.conv1.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2106" name="/up_blocks.0/resnets.2/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37728,10 +34683,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2190" name="Reshape_13940" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1919565088" size="5120"/>
+		<layer id="2107" name="Reshape_22193_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="959783106" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37739,11 +34694,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2191" name="onnx::Add_3952" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2108" name="Reshape_22193" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_13939, Reshape_13940, onnx::Add_3952"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2109" name="/up_blocks.0/resnets.2/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37759,7 +34733,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3952">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37767,40 +34741,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2192" name="onnx::Gemm_3954" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_3954, onnx::Mul_3953"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="2110" name="up_blocks.0.resnets.2.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="959785666" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_3954">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2193" name="m.up_blocks.0.resnets.2.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="1919570208" size="6553600"/>
+		<layer id="2111" name="up_blocks.0.resnets.2.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.2.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.2.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.2.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2194" name="MatMul_13972" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_13972"/>
-			</rt_info>
+		<layer id="2112" name="/up_blocks.0/resnets.2/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37818,20 +34787,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2195" name="Constant_150445" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="1926123808" size="5120"/>
+		<layer id="2113" name="Constant_87035_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="963062466" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2196" name="onnx::Unsqueeze_3955" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2114" name="Constant_87035" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_13973, onnx::Unsqueeze_3955"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2115" name="/up_blocks.0/resnets.2/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37843,27 +34827,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3955">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2197" name="onnx::Unsqueeze_3956" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3956"/>
-			</rt_info>
+		<layer id="2116" name="/up_blocks.0/resnets.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3956">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.2/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2198" name="onnx::Unsqueeze_3957" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3957"/>
-			</rt_info>
+		<layer id="2117" name="/up_blocks.0/resnets.2/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37874,28 +34852,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_3957">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2199" name="onnx::Unsqueeze_3958" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_3958"/>
-			</rt_info>
+		<layer id="2118" name="/up_blocks.0/resnets.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_3958">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.2/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2200" name="onnx::Add_3959" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3959"/>
-			</rt_info>
+		<layer id="2119" name="/up_blocks.0/resnets.2/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37907,7 +34879,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3959">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -37915,11 +34887,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2201" name="onnx::Cast_3960" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.572, onnx::Cast_3960"/>
-			</rt_info>
+		<layer id="2120" name="/up_blocks.0/resnets.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37935,7 +34904,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.572,onnx::Cast_3960">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -37943,22 +34912,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2202" name="onnx::Reshape_3962" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3962"/>
-			</rt_info>
+		<layer id="2121" name="/up_blocks.0/resnets.2/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3962">
+				<port id="0" precision="I64" names="/up_blocks.0/resnets.2/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2203" name="onnx::InstanceNormalization_3963" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3963"/>
-			</rt_info>
+		<layer id="2122" name="/up_blocks.0/resnets.2/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -37971,29 +34934,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3963">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2204" name="Constant_13990" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_13990"/>
-			</rt_info>
+		<layer id="2123" name="Constant_22241" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2205" name="MVN_13991" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14010, Concat_14055, MVN_13991, Multiply_14038, Reshape_14011, Reshape_14056, onnx::Reshape_3966"/>
-			</rt_info>
+		<layer id="2124" name="MVN_22242" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38005,18 +34962,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3966">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2206" name="onnx::Reshape_3967" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3967"/>
-			</rt_info>
+		<layer id="2125" name="/up_blocks.0/resnets.2/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38026,16 +34980,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3967">
+				<port id="1" precision="I64" names="/up_blocks.0/resnets.2/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2207" name="onnx::Mul_3968" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3968"/>
-			</rt_info>
+		<layer id="2126" name="/up_blocks.0/resnets.2/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38047,7 +34998,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3968">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -38055,10 +35006,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2208" name="Constant_150446" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1926128928" size="5120"/>
+		<layer id="2127" name="Constant_87036_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="963065026" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -38066,11 +35017,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2209" name="onnx::Add_3971" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2128" name="Constant_87036" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_3971"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2129" name="/up_blocks.0/resnets.2/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38086,7 +35056,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3971">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -38094,10 +35064,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2210" name="Constant_150447" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1926134048" size="5120"/>
+		<layer id="2130" name="Constant_87037_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="963067586" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -38105,11 +35075,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2211" name="onnx::Cast_3974" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2131" name="Constant_87037" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.576, onnx::Cast_3974"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2132" name="/up_blocks.0/resnets.2/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38125,7 +35114,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.576,onnx::Cast_3974">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -38133,10 +35122,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2212" name="input.580" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.580, onnx::Mul_3976"/>
-			</rt_info>
+		<layer id="2133" name="/up_blocks.0/resnets.2/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38146,7 +35132,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.580">
+				<port id="1" precision="FP32" names="/up_blocks.0/resnets.2/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -38154,13 +35140,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2213" name="m.up_blocks.0.resnets.2.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1926139168" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.resnets.2.conv2.weight"/>
-			</rt_info>
+		<layer id="2134" name="up_blocks.0.resnets.2.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="963070146" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.resnets.2.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -38168,11 +35151,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2214" name="Convolution_14096" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2135" name="up_blocks.0.resnets.2.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14096"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.resnets.2.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2136" name="/up_blocks.0/resnets.2/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38196,10 +35198,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2215" name="Reshape_14116" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="1985121568" size="5120"/>
+		<layer id="2137" name="Reshape_22366_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="992561346" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -38207,11 +35209,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2216" name="onnx::Add_3978" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2138" name="Reshape_22366" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14115, Reshape_14116, onnx::Add_3978"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2139" name="/up_blocks.0/resnets.2/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38227,7 +35248,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_3978">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -38235,11 +35256,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2217" name="onnx::Div_3980" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_3980, x"/>
-			</rt_info>
+		<layer id="2140" name="/up_blocks.0/resnets.2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38255,7 +35273,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Div_3980,x">
+				<port id="2" precision="FP32" names="/up_blocks.0/resnets.2/Add_1_output_0,/up_blocks.0/resnets.2/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>8</dim>
@@ -38263,11 +35281,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2218" name="ShapeOf_14196" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="ShapeOf_14196"/>
-			</rt_info>
+		<layer id="2141" name="ShapeOf_22447" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38282,11 +35297,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2219" name="Convert_14197" type="Convert" version="opset1">
-			<data destination_type="f32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convert_14197"/>
-			</rt_info>
+		<layer id="2142" name="Convert_22448" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>4</dim>
@@ -38298,22 +35310,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2220" name="onnx::Resize_8954" type="Const" version="opset1">
-			<data element_type="f32" shape="4" offset="1985126688" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Resize_8954"/>
-			</rt_info>
+		<layer id="2143" name="/up_blocks.0/upsamplers.0/Constant" type="Const" version="opset1">
+			<data element_type="f32" shape="4" offset="992563906" size="16" />
 			<output>
-				<port id="0" precision="FP32" names="onnx::Resize_8833,onnx::Resize_8954,onnx::Resize_9075">
+				<port id="0" precision="FP32" names="/up_blocks.0/upsamplers.0/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2221" name="Multiply_14198" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_14198"/>
-			</rt_info>
+		<layer id="2144" name="Multiply_22449" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>4</dim>
@@ -38328,11 +35334,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2222" name="Convert_14199" type="Convert" version="opset1">
-			<data destination_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convert_14199"/>
-			</rt_info>
+		<layer id="2145" name="Convert_22450" type="Convert" version="opset1">
+			<data destination_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>4</dim>
@@ -38344,11 +35347,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2223" name="input.584" type="Interpolate" version="opset4">
-			<data mode="nearest" shape_calculation_mode="scales" coordinate_transformation_mode="asymmetric" nearest_mode="floor" antialias="false" pads_begin="0, 0, 0, 0" pads_end="0, 0, 0, 0" cube_coeff="-0.75"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.584"/>
-			</rt_info>
+		<layer id="2146" name="/up_blocks.0/upsamplers.0/Resize" type="Interpolate" version="opset4">
+			<data mode="nearest" shape_calculation_mode="scales" coordinate_transformation_mode="asymmetric" nearest_mode="floor" antialias="false" pads_begin="0, 0, 0, 0" pads_end="0, 0, 0, 0" cube_coeff="-0.75" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38364,7 +35364,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="FP32" names="input.584">
+				<port id="3" precision="FP32" names="/up_blocks.0/upsamplers.0/Resize_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -38372,13 +35372,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2224" name="m.up_blocks.0.upsamplers.0.conv.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="1985126704" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.0.upsamplers.0.conv.weight"/>
-			</rt_info>
+		<layer id="2147" name="up_blocks.0.upsamplers.0.conv.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="992563922" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.0.upsamplers.0.conv.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -38386,11 +35383,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2225" name="Convolution_14201" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2148" name="up_blocks.0.upsamplers.0.conv.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14201"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.0.upsamplers.0.conv.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2149" name="/up_blocks.0/upsamplers.0/conv/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38414,10 +35430,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2226" name="Reshape_14221" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2044109104" size="5120"/>
+		<layer id="2150" name="Reshape_22472_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1022055122" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -38425,11 +35441,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2227" name="onnx::Concat_3988" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2151" name="Reshape_22472" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14220, Reshape_14221, onnx::Concat_3988"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2152" name="/up_blocks.0/upsamplers.0/conv/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38445,7 +35480,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_3988">
+				<port id="2" precision="FP32" names="/up_blocks.0/upsamplers.0/conv/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -38453,11 +35488,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2228" name="onnx::Cast_3989" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.588, onnx::Cast_3989"/>
-			</rt_info>
+		<layer id="2153" name="/up_blocks.1/Concat" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38473,7 +35505,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.588,onnx::Cast_3989">
+				<port id="2" precision="FP32" names="/up_blocks.1/Concat_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>16</dim>
@@ -38481,13 +35513,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2229" name="m.up_blocks.1.resnets.0.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 1, 1" offset="2044114224" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.0.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="2154" name="up_blocks.1.resnets.0.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 1, 1" offset="1022057682" size="6553600" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.0.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -38495,11 +35524,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2230" name="Convolution_14590" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2155" name="up_blocks.1.resnets.0.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14590"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.0.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2156" name="/up_blocks.1/resnets.0/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38523,10 +35571,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2231" name="Reshape_14610" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2057221424" size="5120"/>
+		<layer id="2157" name="Reshape_22856_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1028611282" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -38534,11 +35582,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2232" name="onnx::Add_4034" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2158" name="Reshape_22856" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14609, Reshape_14610, onnx::Add_4034"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2159" name="/up_blocks.1/resnets.0/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38554,7 +35621,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4034">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -38562,22 +35629,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2233" name="onnx::Reshape_3991" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3991"/>
-			</rt_info>
+		<layer id="2160" name="/up_blocks.1/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_3991">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.0/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2234" name="onnx::InstanceNormalization_3992" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_3992"/>
-			</rt_info>
+		<layer id="2161" name="/up_blocks.1/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38590,29 +35651,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_3992">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2235" name="Constant_14260" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14260"/>
-			</rt_info>
+		<layer id="2162" name="Constant_22510" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2236" name="MVN_14261" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14280, Concat_14325, MVN_14261, Multiply_14308, Reshape_14281, Reshape_14326, onnx::Reshape_3995"/>
-			</rt_info>
+		<layer id="2163" name="MVN_22511" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38624,18 +35679,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_3995">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2237" name="onnx::Reshape_3996" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_3996"/>
-			</rt_info>
+		<layer id="2164" name="/up_blocks.1/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38645,16 +35697,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_3996">
+				<port id="1" precision="I64" names="/up_blocks.1/resnets.0/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2238" name="onnx::Mul_3997" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_3997"/>
-			</rt_info>
+		<layer id="2165" name="/up_blocks.1/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38666,7 +35715,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_3997">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm1/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>16</dim>
@@ -38674,10 +35723,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2239" name="Constant_150448" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="2057226544" size="10240"/>
+		<layer id="2166" name="Constant_87038_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="1028613842" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -38685,11 +35734,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2240" name="onnx::Add_4000" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2167" name="Constant_87038" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4000"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2168" name="/up_blocks.1/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38705,7 +35773,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4000">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>16</dim>
@@ -38713,10 +35781,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2241" name="Constant_150449" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="2057236784" size="10240"/>
+		<layer id="2169" name="Constant_87039_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="1028618962" size="5120" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>2560</dim>
 					<dim>1</dim>
@@ -38724,11 +35792,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2242" name="onnx::Cast_4003" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2170" name="Constant_87039" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.592, onnx::Cast_4003"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2171" name="/up_blocks.1/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38744,7 +35831,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.592,onnx::Cast_4003">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>16</dim>
@@ -38752,10 +35839,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2243" name="input.596" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.596, onnx::Mul_4005"/>
-			</rt_info>
+		<layer id="2172" name="/up_blocks.1/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38765,7 +35849,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.596">
+				<port id="1" precision="FP32" names="/up_blocks.1/resnets.0/nonlinearity/Mul_output_0">
 					<dim>2</dim>
 					<dim>2560</dim>
 					<dim>16</dim>
@@ -38773,13 +35857,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2244" name="m.up_blocks.1.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 3, 3" offset="2057247024" size="117964800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="2173" name="up_blocks.1.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 3, 3" offset="1028624082" size="58982400" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>2560</dim>
 					<dim>3</dim>
@@ -38787,11 +35868,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2245" name="Convolution_14366" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2174" name="up_blocks.1.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14366"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.0.conv1.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2175" name="/up_blocks.1/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38815,10 +35915,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2246" name="Reshape_14386" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2175211824" size="5120"/>
+		<layer id="2176" name="Reshape_22635_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1087606482" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -38826,11 +35926,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2247" name="onnx::Add_4007" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2177" name="Reshape_22635" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14385, Reshape_14386, onnx::Add_4007"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2178" name="/up_blocks.1/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38846,7 +35965,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4007">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -38854,40 +35973,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2248" name="onnx::Gemm_4009" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_4009, onnx::Mul_4008"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="2179" name="up_blocks.1.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1087609042" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_4009">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2249" name="m.up_blocks.1.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2175216944" size="6553600"/>
+		<layer id="2180" name="up_blocks.1.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.0.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2250" name="MatMul_14418" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_14418"/>
-			</rt_info>
+		<layer id="2181" name="/up_blocks.1/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38905,20 +36019,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2251" name="Constant_150450" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="2181770544" size="5120"/>
+		<layer id="2182" name="Constant_87040_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="1090885842" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2252" name="onnx::Unsqueeze_4010" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2183" name="Constant_87040" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_14419, onnx::Unsqueeze_4010"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2184" name="/up_blocks.1/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38930,27 +36059,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_4010">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2253" name="onnx::Unsqueeze_4011" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4011"/>
-			</rt_info>
+		<layer id="2185" name="/up_blocks.1/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4011">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2254" name="onnx::Unsqueeze_4012" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4012"/>
-			</rt_info>
+		<layer id="2186" name="/up_blocks.1/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38961,28 +36084,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_4012">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2255" name="onnx::Unsqueeze_4013" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4013"/>
-			</rt_info>
+		<layer id="2187" name="/up_blocks.1/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4013">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2256" name="onnx::Add_4014" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4014"/>
-			</rt_info>
+		<layer id="2188" name="/up_blocks.1/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -38994,7 +36111,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4014">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39002,11 +36119,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2257" name="onnx::Cast_4015" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.600, onnx::Cast_4015"/>
-			</rt_info>
+		<layer id="2189" name="/up_blocks.1/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39022,7 +36136,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.600,onnx::Cast_4015">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39030,22 +36144,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2258" name="onnx::Reshape_4017" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4017"/>
-			</rt_info>
+		<layer id="2190" name="/up_blocks.1/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4017">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.0/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2259" name="onnx::InstanceNormalization_4018" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4018"/>
-			</rt_info>
+		<layer id="2191" name="/up_blocks.1/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39058,29 +36166,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4018">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2260" name="Constant_14436" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14436"/>
-			</rt_info>
+		<layer id="2192" name="Constant_22683" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2261" name="MVN_14437" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14456, Concat_14501, MVN_14437, Multiply_14484, Reshape_14457, Reshape_14502, onnx::Reshape_4021"/>
-			</rt_info>
+		<layer id="2193" name="MVN_22684" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39092,18 +36194,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4021">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2262" name="onnx::Reshape_4022" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4022"/>
-			</rt_info>
+		<layer id="2194" name="/up_blocks.1/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39113,16 +36212,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_4022">
+				<port id="1" precision="I64" names="/up_blocks.1/resnets.0/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2263" name="onnx::Mul_4023" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4023"/>
-			</rt_info>
+		<layer id="2195" name="/up_blocks.1/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39134,7 +36230,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4023">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39142,10 +36238,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2264" name="Constant_150451" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2181775664" size="5120"/>
+		<layer id="2196" name="Constant_87041_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1090888402" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39153,11 +36249,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2265" name="onnx::Add_4026" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2197" name="Constant_87041" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4026"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2198" name="/up_blocks.1/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39173,7 +36288,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4026">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39181,10 +36296,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2266" name="Constant_150452" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2181780784" size="5120"/>
+		<layer id="2199" name="Constant_87042_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1090890962" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39192,11 +36307,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2267" name="onnx::Cast_4029" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2200" name="Constant_87042" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.604, onnx::Cast_4029"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2201" name="/up_blocks.1/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39212,7 +36346,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.604,onnx::Cast_4029">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39220,10 +36354,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2268" name="input.608" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.608, onnx::Mul_4031"/>
-			</rt_info>
+		<layer id="2202" name="/up_blocks.1/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39233,7 +36364,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.608">
+				<port id="1" precision="FP32" names="/up_blocks.1/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39241,13 +36372,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2269" name="m.up_blocks.1.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="2181785904" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="2203" name="up_blocks.1.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="1090893522" size="29491200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>3</dim>
@@ -39255,11 +36383,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2270" name="Convolution_14542" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2204" name="up_blocks.1.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14542"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.0.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2205" name="/up_blocks.1/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39283,10 +36430,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2271" name="Reshape_14562" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2240768304" size="5120"/>
+		<layer id="2206" name="Reshape_22808_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1120384722" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39294,11 +36441,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2272" name="onnx::Add_4033" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2207" name="Reshape_22808" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14561, Reshape_14562, onnx::Add_4033"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2208" name="/up_blocks.1/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39314,7 +36480,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4033">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39322,11 +36488,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2273" name="onnx::Div_4035" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.612, onnx::Div_4035"/>
-			</rt_info>
+		<layer id="2209" name="/up_blocks.1/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39342,7 +36505,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.612,onnx::Div_4035">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.0/Add_1_output_0,/up_blocks.1/resnets.0/Div_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39350,22 +36513,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2274" name="onnx::Reshape_4050" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4050"/>
-			</rt_info>
+		<layer id="2210" name="/up_blocks.1/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4050">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2275" name="onnx::InstanceNormalization_4051" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4051"/>
-			</rt_info>
+		<layer id="2211" name="/up_blocks.1/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39378,29 +36535,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4051">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2276" name="Constant_14666" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14666"/>
-			</rt_info>
+		<layer id="2212" name="Constant_22896" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2277" name="MVN_14667" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14686, Concat_14731, MVN_14667, Multiply_14714, Reshape_14687, Reshape_14732, onnx::Reshape_4054"/>
-			</rt_info>
+		<layer id="2213" name="MVN_22897" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39412,18 +36563,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4054">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2278" name="onnx::Reshape_4055" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4038, onnx::Gather_4041, onnx::Gather_4044, onnx::Gather_4047, onnx::Reshape_4055"/>
-			</rt_info>
+		<layer id="2214" name="/up_blocks.1/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39433,16 +36581,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4038,onnx::Gather_4041,onnx::Gather_4044,onnx::Gather_4047,onnx::Reshape_4055">
+				<port id="1" precision="I64" names="/up_blocks.1/attentions.0/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2279" name="onnx::Mul_4056" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4056"/>
-			</rt_info>
+		<layer id="2215" name="/up_blocks.1/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39454,7 +36599,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4056">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39462,10 +36607,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2280" name="Constant_150453" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2240773424" size="5120"/>
+		<layer id="2216" name="Constant_87043_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1120387282" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39473,11 +36618,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2281" name="onnx::Add_4059" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2217" name="Constant_87043" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4059"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2218" name="/up_blocks.1/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39493,7 +36657,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4059">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39501,10 +36665,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2282" name="Constant_150454" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2240778544" size="5120"/>
+		<layer id="2219" name="Constant_87044_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1120389842" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39512,11 +36676,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2283" name="input.616" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2220" name="Constant_87044" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.616"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2221" name="/up_blocks.1/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39532,7 +36715,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.616">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39540,13 +36723,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2284" name="m.up_blocks.1.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="2240783664" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.attentions.0.proj_in.weight"/>
-			</rt_info>
+		<layer id="2222" name="up_blocks.1.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="1120392402" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.attentions.0.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39554,11 +36734,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2285" name="Convolution_14769" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2223" name="up_blocks.1.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_14769"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.1.attentions.0.proj_in.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2224" name="/up_blocks.1/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39582,10 +36781,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2286" name="Reshape_14789" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2247337264" size="5120"/>
+		<layer id="2225" name="Reshape_23019_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1123669202" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -39593,11 +36792,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2287" name="onnx::Transpose_4063" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2226" name="Reshape_23019" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_14788, Reshape_14789, onnx::Transpose_4063"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2227" name="/up_blocks.1/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39613,7 +36831,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4063">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -39621,21 +36839,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2288" name="Constant_14817" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14817"/>
-			</rt_info>
+		<layer id="2228" name="Constant_23047" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2289" name="onnx::Reshape_4064" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4064"/>
-			</rt_info>
+		<layer id="2229" name="/up_blocks.1/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39648,7 +36860,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4064">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/Transpose_output_0">
 					<dim>2</dim>
 					<dim>16</dim>
 					<dim>16</dim>
@@ -39656,289 +36868,147 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2290" name="Constant_88644" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14643, onnx::Concat_4067, onnx::Gather_4039, onnx::Unsqueeze_4040, onnx::Unsqueeze_4066"/>
-			</rt_info>
+		<layer id="2230" name="/up_blocks.1/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2291" name="Constant_14643" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14643"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2292" name="onnx::Unsqueeze_4040" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14643, onnx::Concat_4067, onnx::Gather_4039, onnx::Unsqueeze_4040, onnx::Unsqueeze_4066"/>
-			</rt_info>
+		<layer id="2231" name="/up_blocks.1/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4067">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2293" name="onnx::Gather_4045" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4045"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4045"/>
-			</output>
-		</layer>
-		<layer id="2294" name="Constant_14651" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14651"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2295" name="onnx::Mul_4046" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14651, onnx::Gather_4045, onnx::Mul_4046"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/Reshape_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_4046"/>
-			</output>
-		</layer>
-		<layer id="2296" name="onnx::Gather_4048" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4048"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4048"/>
 			</output>
 		</layer>
-		<layer id="2297" name="Constant_14655" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14655"/>
-			</rt_info>
+		<layer id="2232" name="Constant_23056" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2298" name="onnx::Mul_4049" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14655, onnx::Gather_4048, onnx::Mul_4049"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_4049"/>
-			</output>
-		</layer>
-		<layer id="2299" name="onnx::Unsqueeze_4065" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4065"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4065"/>
-			</output>
-		</layer>
-		<layer id="2300" name="onnx::Unsqueeze_4068" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4068"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4068">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2301" name="onnx::Concat_4069" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4069, onnx::Unsqueeze_4068"/>
-			</rt_info>
+		<layer id="2233" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4069">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2302" name="Constant_88653" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14647, onnx::Concat_4071, onnx::Gather_4042, onnx::Unsqueeze_4043, onnx::Unsqueeze_4070"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm1/Div_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2303" name="Constant_14647" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14647"/>
-			</rt_info>
+		<layer id="2234" name="Constant_87045_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1123671762" size="2560" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2304" name="onnx::Unsqueeze_4043" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14647, onnx::Concat_4071, onnx::Gather_4042, onnx::Unsqueeze_4043, onnx::Unsqueeze_4070"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4071">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2305" name="onnx::Reshape_4072" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2235" name="Constant_87045" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4072"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4072">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2306" name="input.620" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.620"/>
-			</rt_info>
+		<layer id="2236" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.620">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2307" name="Constant_14902" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14902"/>
-			</rt_info>
+		<layer id="2237" name="Constant_87046_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1123674322" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2308" name="onnx::Mul_4082" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="2238" name="Constant_87046" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4078, onnx::Div_4081, onnx::Mul_4082, onnx::Pow_4075, onnx::ReduceMean_4077, onnx::Sqrt_4080, onnx::Sub_4074"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4082">
-					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2309" name="Constant_150455" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2247342384" size="5120"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2310" name="onnx::Add_4083" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4083"/>
-			</rt_info>
+		<layer id="2239" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -39952,65 +37022,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4083">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2311" name="Constant_150456" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2247347504" size="5120"/>
+		<layer id="2240" name="Constant_85498_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1123676882" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2312" name="onnx::MatMul_4084" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2241" name="Constant_85498" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4084"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4084">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2313" name="Constant_147971" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2247352624" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8840, q.111"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2314" name="q.111" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8840, q.111"/>
-			</rt_info>
+		<layer id="2242" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -40023,149 +37070,158 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.111">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2315" name="Constant_107544" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2243" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2316" name="onnx::Gather_4091" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4091, onnx::Gather_4094, onnx::Gather_4097"/>
-			</rt_info>
+		<layer id="2244" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4091,onnx::Gather_4094,onnx::Gather_4097">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2317" name="onnx::Gather_4098" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4098"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4098"/>
-			</output>
-		</layer>
-		<layer id="2318" name="Constant_14928" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14928"/>
-			</rt_info>
+		<layer id="2245" name="Constant_23079" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2319" name="onnx::Div_4099" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14928, onnx::Div_4099, onnx::Gather_4098"/>
-			</rt_info>
+		<layer id="2246" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4099"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2320" name="onnx::Div_4100" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4100"/>
-			</rt_info>
+		<layer id="2247" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4100"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2321" name="onnx::Cast_4101" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4101, onnx::Cast_4102, onnx::Div_4100, onnx::Unsqueeze_4103"/>
-			</rt_info>
+		<layer id="2248" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4101,onnx::Cast_4102,onnx::Unsqueeze_4103"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2322" name="onnx::Unsqueeze_4111" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4111"/>
-			</rt_info>
+		<layer id="2249" name="Constant_85505_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1126953682" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4111">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2323" name="onnx::Concat_4112" type="Unsqueeze" version="opset1">
+		<layer id="2250" name="Constant_85505" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4112, onnx::Unsqueeze_4111"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4112">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2324" name="onnx::Reshape_4113" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4113"/>
-			</rt_info>
+		<layer id="2251" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2252" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2325" name="onnx::Transpose_4114" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4114"/>
-			</rt_info>
+		<layer id="2253" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -40177,7 +37233,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4114">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -40185,21 +37241,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2326" name="Constant_15041" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15041"/>
-			</rt_info>
+		<layer id="2254" name="Constant_23095" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2327" name="onnx::Reshape_4115" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4115"/>
-			</rt_info>
+		<layer id="2255" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -40212,7 +37262,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4115">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>256</dim>
@@ -40220,408 +37270,334 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2328" name="onnx::Gather_4092" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4092"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4092"/>
-			</output>
-		</layer>
-		<layer id="2329" name="Constant_14920" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14920"/>
-			</rt_info>
+		<layer id="2256" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2330" name="onnx::Unsqueeze_4093" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14920, onnx::Gather_4092, onnx::Unsqueeze_4093"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4093"/>
-			</output>
-		</layer>
-		<layer id="2331" name="onnx::Mul_4116" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4116"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4116"/>
 			</output>
 		</layer>
-		<layer id="2332" name="onnx::Unsqueeze_4117" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4116, onnx::Unsqueeze_4117"/>
-			</rt_info>
+		<layer id="2257" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4117"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2333" name="onnx::Unsqueeze_4122" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4122"/>
-			</rt_info>
+		<layer id="2258" name="Constant_87047_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4122">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2334" name="onnx::Concat_4123" type="Unsqueeze" version="opset1">
+		<layer id="2259" name="Constant_87047" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4123, onnx::Unsqueeze_4122"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4123">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2335" name="Constant_88680" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14924, onnx::Concat_4125, onnx::Gather_4095, onnx::Unsqueeze_4096, onnx::Unsqueeze_4124"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2336" name="Constant_14924" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14924"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2337" name="onnx::Unsqueeze_4096" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_14924, onnx::Concat_4125, onnx::Gather_4095, onnx::Unsqueeze_4096, onnx::Unsqueeze_4124"/>
-			</rt_info>
+		<layer id="2260" name="Multiply_86203" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4125">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2338" name="onnx::Div_4118" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4118"/>
-			</rt_info>
+		<layer id="2261" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4118"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Add_output_0,/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2339" name="onnx::Cast_4119" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4119, onnx::Cast_4120, onnx::Div_4118, onnx::Unsqueeze_4121"/>
-			</rt_info>
+		<layer id="2262" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4119,onnx::Cast_4120,onnx::Unsqueeze_4121"/>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2340" name="onnx::Unsqueeze_4126" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4126"/>
-			</rt_info>
+		<layer id="2263" name="Constant_85512_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1130230482" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4126">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2341" name="onnx::Concat_4127" type="Unsqueeze" version="opset1">
+		<layer id="2264" name="Constant_85512" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4127, onnx::Unsqueeze_4126"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4127">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2342" name="onnx::Reshape_4128" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4128"/>
-			</rt_info>
+		<layer id="2265" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4128">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2343" name="q.115" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.115"/>
-			</rt_info>
+		<layer id="2266" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2267" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.115">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
+					<dim>2</dim>
 					<dim>256</dim>
+					<dim>8</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2344" name="Constant_147978" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2253906224" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.111, onnx::MatMul_8841"/>
-			</rt_info>
+		<layer id="2268" name="Constant_23111" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2345" name="k.111" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.111, onnx::MatMul_8841"/>
-			</rt_info>
+		<layer id="2269" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.111">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2346" name="Constant_107613" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2270" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2347" name="onnx::Gather_4130" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4130, onnx::Gather_4133, onnx::Gather_4136"/>
-			</rt_info>
+		<layer id="2271" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4130,onnx::Gather_4133,onnx::Gather_4136">
-					<dim>3</dim>
+					<dim>160</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2348" name="onnx::Gather_4137" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4137"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4137"/>
-			</output>
-		</layer>
-		<layer id="2349" name="Constant_15169" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15169"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2350" name="onnx::Div_4138" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15169, onnx::Div_4138, onnx::Gather_4137"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4138"/>
-			</output>
-		</layer>
-		<layer id="2351" name="onnx::Div_4139" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4139"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4139"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2352" name="onnx::Cast_4140" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4140, onnx::Cast_4141, onnx::Div_4139, onnx::Unsqueeze_4142"/>
-			</rt_info>
+		<layer id="2272" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4140,onnx::Cast_4141,onnx::Unsqueeze_4142"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2353" name="onnx::Unsqueeze_4150" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4150"/>
-			</rt_info>
+		<layer id="2273" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4150">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2354" name="onnx::Concat_4151" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4151, onnx::Unsqueeze_4150"/>
-			</rt_info>
+		<layer id="2274" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4151">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2355" name="onnx::Reshape_4152" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4152"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2275" name="Constant_23137" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="3" precision="I64">
+				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2356" name="onnx::Transpose_4153" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4153"/>
-			</rt_info>
+		<layer id="2276" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4153">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>8</dim>
@@ -40629,21 +37605,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2357" name="Constant_15282" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15282"/>
-			</rt_info>
+		<layer id="2277" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2358" name="onnx::Reshape_4154" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4154"/>
-			</rt_info>
+		<layer id="2278" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -40652,333 +37623,270 @@
 					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4154">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2359" name="onnx::Gather_4131" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4131"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4131"/>
-			</output>
-		</layer>
-		<layer id="2360" name="Constant_15161" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15161"/>
-			</rt_info>
+		<layer id="2279" name="Constant_85519_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1133507282" size="3276800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2361" name="onnx::Unsqueeze_4132" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2280" name="Constant_85519" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15161, onnx::Gather_4131, onnx::Unsqueeze_4132"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4132"/>
-			</output>
-		</layer>
-		<layer id="2362" name="onnx::Mul_4155" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4155"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4155"/>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2363" name="onnx::Unsqueeze_4156" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4155, onnx::Unsqueeze_4156"/>
-			</rt_info>
+		<layer id="2281" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4156"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2364" name="onnx::Unsqueeze_4161" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4161"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4161">
+		<layer id="2282" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2365" name="onnx::Concat_4162" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4162, onnx::Unsqueeze_4161"/>
-			</rt_info>
+		<layer id="2283" name="/up_blocks.1/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4162">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2366" name="Constant_88707" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15165, onnx::Concat_4164, onnx::Gather_4134, onnx::Unsqueeze_4135, onnx::Unsqueeze_4163"/>
-			</rt_info>
+		<layer id="2284" name="Constant_23149" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2367" name="Constant_15165" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15165"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2368" name="onnx::Unsqueeze_4135" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15165, onnx::Concat_4164, onnx::Gather_4134, onnx::Unsqueeze_4135, onnx::Unsqueeze_4163"/>
-			</rt_info>
+		<layer id="2285" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4164">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm2/Div_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2369" name="onnx::Div_4157" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4157"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4157"/>
-			</output>
-		</layer>
-		<layer id="2370" name="onnx::Cast_4158" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4158, onnx::Cast_4159, onnx::Div_4157, onnx::Unsqueeze_4160"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4158,onnx::Cast_4159,onnx::Unsqueeze_4160"/>
-			</output>
-		</layer>
-		<layer id="2371" name="onnx::Unsqueeze_4165" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4165"/>
-			</rt_info>
+		<layer id="2286" name="Constant_87049_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1136784082" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4165">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2372" name="onnx::Concat_4166" type="Unsqueeze" version="opset1">
+		<layer id="2287" name="Constant_87049" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4166, onnx::Unsqueeze_4165"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4166">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2373" name="onnx::Reshape_4167" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4167"/>
-			</rt_info>
+		<layer id="2288" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4167">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2374" name="k.115" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.115"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="2289" name="Constant_87050_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1136786642" size="2560" />
 			<output>
-				<port id="2" precision="FP32" names="k.115">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2375" name="onnx::Mul_4208" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="2290" name="Constant_87050" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4208"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4208">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2376" name="Constant_150457" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2377" name="onnx::Softmax_4210" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_4210"/>
-			</rt_info>
+		<layer id="2291" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
+					<dim>2</dim>
 					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_4210">
-					<dim>16</dim>
-					<dim>256</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
+					<dim>2</dim>
 					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2378" name="attn.55" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.55"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
-				</port>
-			</input>
+		<layer id="2292" name="Constant_85527_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1136789202" size="3276800" />
 			<output>
-				<port id="1" precision="FP32" names="attn.55">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2379" name="Constant_147985" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2260459824" size="6553600"/>
+		<layer id="2293" name="Constant_85527" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8842, v.111"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2380" name="v.111" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8842, v.111"/>
-			</rt_info>
+		<layer id="2294" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -40991,153 +37899,162 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.111">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2381" name="Constant_107682" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2295" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2382" name="onnx::Gather_4169" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4169, onnx::Gather_4172, onnx::Gather_4175"/>
-			</rt_info>
+		<layer id="2296" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4169,onnx::Gather_4172,onnx::Gather_4175">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2383" name="onnx::Gather_4176" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4176"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4176"/>
-			</output>
-		</layer>
-		<layer id="2384" name="Constant_15410" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15410"/>
-			</rt_info>
+		<layer id="2297" name="Constant_23172" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2385" name="onnx::Div_4177" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15410, onnx::Div_4177, onnx::Gather_4176"/>
-			</rt_info>
+		<layer id="2298" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4177"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2386" name="onnx::Div_4178" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4178"/>
-			</rt_info>
+		<layer id="2299" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4178"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2387" name="onnx::Cast_4179" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4179, onnx::Cast_4180, onnx::Div_4178, onnx::Unsqueeze_4181"/>
-			</rt_info>
+		<layer id="2300" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4179,onnx::Cast_4180,onnx::Unsqueeze_4181"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2388" name="onnx::Unsqueeze_4189" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4189"/>
-			</rt_info>
+		<layer id="2301" name="Constant_85534_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="1140066002" size="1966080" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4189">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2389" name="onnx::Concat_4190" type="Unsqueeze" version="opset1">
+		<layer id="2302" name="Constant_85534" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4190, onnx::Unsqueeze_4189"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4190">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2390" name="onnx::Reshape_4191" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4191"/>
-			</rt_info>
+		<layer id="2303" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2304" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2391" name="onnx::Transpose_4192" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4192"/>
-			</rt_info>
+		<layer id="2305" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -41145,33 +38062,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4192">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2392" name="Constant_15523" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15523"/>
-			</rt_info>
+		<layer id="2306" name="Constant_23188" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2393" name="onnx::Reshape_4193" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4193"/>
-			</rt_info>
+		<layer id="2307" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>160</dim>
 				</port>
@@ -41180,461 +38091,225 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4193">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2394" name="onnx::Gather_4170" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4170"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4170"/>
-			</output>
-		</layer>
-		<layer id="2395" name="Constant_15402" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15402"/>
-			</rt_info>
+		<layer id="2308" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2396" name="onnx::Unsqueeze_4171" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15402, onnx::Gather_4170, onnx::Unsqueeze_4171"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4171"/>
-			</output>
-		</layer>
-		<layer id="2397" name="onnx::Mul_4194" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4194"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4194"/>
 			</output>
 		</layer>
-		<layer id="2398" name="onnx::Unsqueeze_4195" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4194, onnx::Unsqueeze_4195"/>
-			</rt_info>
+		<layer id="2309" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4195"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2399" name="onnx::Unsqueeze_4200" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4200"/>
-			</rt_info>
+		<layer id="2310" name="Constant_87051_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4200">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2400" name="onnx::Concat_4201" type="Unsqueeze" version="opset1">
+		<layer id="2311" name="Constant_87051" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4201, onnx::Unsqueeze_4200"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4201">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2401" name="Constant_88734" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15406, onnx::Concat_4203, onnx::Gather_4173, onnx::Unsqueeze_4174, onnx::Unsqueeze_4202"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2402" name="Constant_15406" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15406"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2403" name="onnx::Unsqueeze_4174" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15406, onnx::Concat_4203, onnx::Gather_4173, onnx::Unsqueeze_4174, onnx::Unsqueeze_4202"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4203">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2404" name="onnx::Div_4196" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4196"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4196"/>
-			</output>
-		</layer>
-		<layer id="2405" name="onnx::Cast_4197" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4197, onnx::Cast_4198, onnx::Div_4196, onnx::Unsqueeze_4199"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4197,onnx::Cast_4198,onnx::Unsqueeze_4199"/>
-			</output>
-		</layer>
-		<layer id="2406" name="onnx::Unsqueeze_4204" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4204"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4204">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2407" name="onnx::Concat_4205" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4205, onnx::Unsqueeze_4204"/>
-			</rt_info>
+		<layer id="2312" name="Multiply_86205" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4205">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2408" name="onnx::Reshape_4206" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4206"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4206">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2409" name="v.115" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.115"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.115">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2410" name="out.55" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.55"/>
-			</rt_info>
+		<layer id="2313" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>16</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.55">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Add_output_0,/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2411" name="onnx::Gather_4213" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4213, onnx::Gather_4216, onnx::Gather_4219"/>
-			</rt_info>
+		<layer id="2314" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4213,onnx::Gather_4216,onnx::Gather_4219">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2412" name="onnx::Gather_4214" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4214"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4214"/>
-			</output>
-		</layer>
-		<layer id="2413" name="Constant_15648" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15648"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2414" name="onnx::Div_4215" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15648, onnx::Div_4215, onnx::Gather_4214"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_4215"/>
-			</output>
-		</layer>
-		<layer id="2415" name="onnx::Div_4222" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4222"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4222"/>
-			</output>
-		</layer>
-		<layer id="2416" name="onnx::Cast_4223" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4223, onnx::Cast_4224, onnx::Div_4222, onnx::Unsqueeze_4225"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4223,onnx::Cast_4224,onnx::Unsqueeze_4225"/>
 			</output>
 		</layer>
-		<layer id="2417" name="onnx::Unsqueeze_4227" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4227"/>
-			</rt_info>
+		<layer id="2315" name="Constant_85541_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="1142032082" size="1966080" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4227">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2418" name="onnx::Concat_4228" type="Unsqueeze" version="opset1">
+		<layer id="2316" name="Constant_85541" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4228, onnx::Unsqueeze_4227"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4228">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2419" name="Constant_90702" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4235"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2420" name="Constant_90703" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2421" name="Gather_90704" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4235"/>
-			</rt_info>
+		<layer id="2317" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2422" name="onnx::Reshape_4235" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4235"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="2318" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4235">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2423" name="onnx::Transpose_4236" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4236"/>
-			</rt_info>
+		<layer id="2319" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4236">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>8</dim>
-					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2424" name="Constant_15769" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15769"/>
-			</rt_info>
+		<layer id="2320" name="Constant_23204" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2425" name="onnx::Reshape_4237" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4237"/>
-			</rt_info>
+		<layer id="2321" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>8</dim>
-					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -41642,213 +38317,133 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4237">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>8</dim>
+					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2426" name="onnx::Div_4238" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4238"/>
-			</rt_info>
+		<layer id="2322" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4238"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2427" name="onnx::Cast_4239" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4239, onnx::Cast_4240, onnx::Div_4238, onnx::Unsqueeze_4241"/>
-			</rt_info>
+		<layer id="2323" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4239,onnx::Cast_4240,onnx::Unsqueeze_4241"/>
-			</output>
-		</layer>
-		<layer id="2428" name="onnx::Unsqueeze_4244" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4244"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4244">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2429" name="onnx::Concat_4245" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4245, onnx::Unsqueeze_4244"/>
-			</rt_info>
+		<layer id="2324" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4245">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2430" name="Constant_88761" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15652, onnx::Concat_4247, onnx::Gather_4217, onnx::Unsqueeze_4218, onnx::Unsqueeze_4246"/>
-			</rt_info>
+		<layer id="2325" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2431" name="Constant_15652" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15652"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2432" name="onnx::Unsqueeze_4218" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15652, onnx::Concat_4247, onnx::Gather_4217, onnx::Unsqueeze_4218, onnx::Unsqueeze_4246"/>
-			</rt_info>
+		<layer id="2326" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4247">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2433" name="onnx::Gather_4220" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4220"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4220"/>
-			</output>
-		</layer>
-		<layer id="2434" name="Constant_15656" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15656"/>
-			</rt_info>
+		<layer id="2327" name="Constant_23230" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2435" name="onnx::Unsqueeze_4221" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15656, onnx::Gather_4220, onnx::Unsqueeze_4221"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4221"/>
-			</output>
-		</layer>
-		<layer id="2436" name="onnx::Mul_4242" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4242"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4242"/>
 			</output>
 		</layer>
-		<layer id="2437" name="onnx::Unsqueeze_4243" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4242, onnx::Unsqueeze_4243"/>
-			</rt_info>
+		<layer id="2328" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4243"/>
-			</output>
-		</layer>
-		<layer id="2438" name="onnx::Unsqueeze_4248" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4248"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4248">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2439" name="onnx::Concat_4249" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4249, onnx::Unsqueeze_4248"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4249">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2440" name="onnx::Reshape_4250" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4250"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2329" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4250">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2441" name="onnx::MatMul_4251" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4251"/>
-			</rt_info>
+		<layer id="2330" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -41861,30 +38456,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4251">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2442" name="Constant_147992" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2267013424" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4253, onnx::MatMul_8847"/>
-			</rt_info>
+		<layer id="2331" name="Constant_85548_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1143998162" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2443" name="onnx::Add_4253" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2332" name="Constant_85548" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4253, onnx::MatMul_8847"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2333" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -41897,18 +38504,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4253">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2444" name="input.624" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.624"/>
-			</rt_info>
+		<layer id="2334" name="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -41922,18 +38526,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.624">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2445" name="input.628" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.628"/>
-			</rt_info>
+		<layer id="2335" name="/up_blocks.1/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -41947,29 +38548,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.628">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2446" name="Constant_15890" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15890"/>
-			</rt_info>
+		<layer id="2336" name="Constant_23242" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2447" name="onnx::Mul_4264" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4260, onnx::Div_4263, onnx::Mul_4264, onnx::Pow_4257, onnx::ReduceMean_4259, onnx::Sqrt_4262, onnx::Sub_4256"/>
-			</rt_info>
+		<layer id="2337" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -41981,28 +38576,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4264">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2448" name="Constant_150459" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2273567024" size="5120"/>
+		<layer id="2338" name="Constant_87053_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1147274962" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2449" name="onnx::Add_4265" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2339" name="Constant_87053" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4265"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2340" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -42016,28 +38628,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4265">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2450" name="Constant_150460" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2273572144" size="5120"/>
+		<layer id="2341" name="Constant_87054_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1147277522" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2451" name="onnx::MatMul_4266" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2342" name="Constant_87054" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4266"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2343" name="/up_blocks.1/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -42051,30 +38680,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4266">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2452" name="Constant_148000" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2273577264" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8848, q.119"/>
-			</rt_info>
+		<layer id="2344" name="Constant_85556_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="10240, 1280" offset="1147280082" size="26214400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2453" name="q.119" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2345" name="Constant_85556" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8848, q.119"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>10240</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2346" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -42082,135 +38723,170 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
+					<dim>10240</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.119">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2454" name="Constant_107751" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2347" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2455" name="onnx::Gather_4273" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4273, onnx::Gather_4276, onnx::Gather_4279"/>
-			</rt_info>
+		<layer id="2348" name="Constant_77615" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2349" name="Constant_77616" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2350" name="Constant_77612" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2351" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4273,onnx::Gather_4276,onnx::Gather_4279">
+				<port id="1" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2456" name="onnx::Gather_4280" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4280"/>
-			</rt_info>
+		<layer id="2352" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4280"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2457" name="Constant_15916" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15916"/>
-			</rt_info>
+		<layer id="2353" name="Constant_23259" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="2458" name="onnx::Div_4281" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15916, onnx::Div_4281, onnx::Gather_4280"/>
-			</rt_info>
+		<layer id="2354" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4281"/>
+				<port id="3" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2459" name="onnx::Div_4282" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4282"/>
-			</rt_info>
+		<layer id="2355" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4282"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2460" name="onnx::Cast_4283" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4283, onnx::Cast_4284, onnx::Div_4282, onnx::Unsqueeze_4285"/>
-			</rt_info>
+		<layer id="2356" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4283,onnx::Cast_4284,onnx::Unsqueeze_4285"/>
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2461" name="onnx::Unsqueeze_4293" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4293"/>
-			</rt_info>
+		<layer id="2357" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4293">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2462" name="onnx::Concat_4294" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4294, onnx::Unsqueeze_4293"/>
-			</rt_info>
+		<layer id="2358" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4294">
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2463" name="onnx::Reshape_4295" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4295"/>
-			</rt_info>
+		<layer id="2359" name="Constant_77611" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
+			<output>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2360" name="ScatterUpdate_77617" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
@@ -42218,335 +38894,327 @@
 				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2464" name="onnx::Transpose_4296" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4296"/>
-			</rt_info>
+		<layer id="2361" name="Constant_77620" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2362" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4296">
+				<port id="4" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2465" name="Constant_16029" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16029"/>
-			</rt_info>
+		<layer id="2363" name="Constant_77684" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2466" name="onnx::Reshape_4297" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4297"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
+		<layer id="2364" name="Constant_77683" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4297">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2467" name="onnx::Gather_4274" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4274"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4274"/>
-			</output>
-		</layer>
-		<layer id="2468" name="Constant_15908" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15908"/>
-			</rt_info>
+		<layer id="2365" name="Constant_77682" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2469" name="onnx::Unsqueeze_4275" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15908, onnx::Gather_4274, onnx::Unsqueeze_4275"/>
-			</rt_info>
+		<layer id="2366" name="ScatterUpdate_77685" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4275"/>
-			</output>
-		</layer>
-		<layer id="2470" name="onnx::Mul_4298" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4298"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4298"/>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2471" name="onnx::Unsqueeze_4299" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4298, onnx::Unsqueeze_4299"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="2367" name="Constant_77686" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4299"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2472" name="onnx::Unsqueeze_4304" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4304"/>
-			</rt_info>
+		<layer id="2368" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4304">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2473" name="onnx::Concat_4305" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4305, onnx::Unsqueeze_4304"/>
-			</rt_info>
+		<layer id="2369" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4305">
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2474" name="Constant_88788" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15912, onnx::Concat_4307, onnx::Gather_4277, onnx::Unsqueeze_4278, onnx::Unsqueeze_4306"/>
-			</rt_info>
-			<output>
+		<layer id="2370" name="ScatterUpdate_77687" type="ScatterUpdate" version="opset3">
+			<input>
 				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2475" name="Constant_15912" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15912"/>
-			</rt_info>
+		<layer id="2371" name="Constant_77690" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2476" name="onnx::Unsqueeze_4278" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_15912, onnx::Concat_4307, onnx::Gather_4277, onnx::Unsqueeze_4278, onnx::Unsqueeze_4306"/>
-			</rt_info>
+		<layer id="2372" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4307">
-					<dim>1</dim>
+				<port id="4" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2477" name="onnx::Div_4300" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4300"/>
-			</rt_info>
+		<layer id="2373" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4300"/>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2478" name="onnx::Cast_4301" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4301, onnx::Cast_4302, onnx::Div_4300, onnx::Unsqueeze_4303"/>
-			</rt_info>
+		<layer id="2374" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4301,onnx::Cast_4302,onnx::Unsqueeze_4303"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2479" name="onnx::Unsqueeze_4308" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4308"/>
-			</rt_info>
+		<layer id="2375" name="Constant_85564_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 5120" offset="1173494482" size="13107200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4308">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2480" name="onnx::Concat_4309" type="Unsqueeze" version="opset1">
+		<layer id="2376" name="Constant_85564" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4309, onnx::Unsqueeze_4308"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4309">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2481" name="onnx::Reshape_4310" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4310"/>
-			</rt_info>
+		<layer id="2377" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4310">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2482" name="q.123" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.123"/>
-			</rt_info>
+		<layer id="2378" name="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.123">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
 					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2483" name="Constant_148007" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="2280130864" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.119, onnx::MatMul_8849"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1280</dim>
-					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2484" name="k.119" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.119, onnx::MatMul_8849"/>
-			</rt_info>
+		<layer id="2379" name="/up_blocks.1/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.119">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2485" name="onnx::Transpose_4325" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4325"/>
-			</rt_info>
+		<layer id="2380" name="/up_blocks.1/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="214428690" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.0/Constant_1_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2381" name="/up_blocks.1/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -42554,755 +39222,701 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4325">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2486" name="Constant_16150" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16150"/>
-			</rt_info>
+		<layer id="2382" name="Constant_23424" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2487" name="onnx::Reshape_4326" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4326"/>
-			</rt_info>
+		<layer id="2383" name="/up_blocks.1/attentions.0/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4326">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2384" name="up_blocks.1.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="1186601682" size="3276800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2488" name="k.123" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="2385" name="up_blocks.1.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.123"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.123">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32" names="up_blocks.1.attentions.0.proj_out.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2489" name="onnx::Mul_4362" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4362"/>
-			</rt_info>
+		<layer id="2386" name="/up_blocks.1/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4362">
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2490" name="Constant_150461" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
+		<layer id="2387" name="Reshape_23446_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1189878482" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2491" name="onnx::Softmax_4364" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2388" name="Reshape_23446" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_4364"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_4364">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2492" name="attn.59" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.59"/>
-			</rt_info>
+		<layer id="2389" name="/up_blocks.1/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="attn.59">
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2493" name="Constant_148014" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="2284063024" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8850, v.119"/>
-			</rt_info>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/proj_out/Conv_output_0">
+					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2494" name="v.119" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8850, v.119"/>
-			</rt_info>
+		<layer id="2390" name="/up_blocks.1/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.119">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.0/Add_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2495" name="onnx::Transpose_4350" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4350"/>
-			</rt_info>
+		<layer id="2391" name="/up_blocks.1/Concat_1" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4350">
+				<port id="2" precision="FP32" names="/up_blocks.1/Concat_1_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2496" name="Constant_16158" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16158"/>
-			</rt_info>
+		<layer id="2392" name="up_blocks.1.resnets.1.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 1, 1" offset="1189881042" size="6553600" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2497" name="onnx::Reshape_4351" type="Transpose" version="opset1">
+		<layer id="2393" name="up_blocks.1.resnets.1.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4351"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4351">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.1.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2498" name="v.123" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.123"/>
-			</rt_info>
+		<layer id="2394" name="/up_blocks.1/resnets.1/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.123">
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>16</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2395" name="Reshape_23831_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1196434642" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2499" name="out.59" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
+		<layer id="2396" name="Reshape_23831" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="out.59"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.59">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2500" name="onnx::Gather_4367" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4367, onnx::Gather_4370, onnx::Gather_4373"/>
-			</rt_info>
+		<layer id="2397" name="/up_blocks.1/resnets.1/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4367,onnx::Gather_4370,onnx::Gather_4373">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/conv_shortcut/Conv_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2501" name="onnx::Gather_4368" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4368"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4368"/>
-			</output>
-		</layer>
-		<layer id="2502" name="Constant_16170" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16170"/>
-			</rt_info>
+		<layer id="2398" name="/up_blocks.1/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2503" name="onnx::Div_4369" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16170, onnx::Div_4369, onnx::Gather_4368"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_4369"/>
-			</output>
-		</layer>
-		<layer id="2504" name="onnx::Div_4376" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4376"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4376"/>
 			</output>
 		</layer>
-		<layer id="2505" name="onnx::Cast_4377" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4377, onnx::Cast_4378, onnx::Div_4376, onnx::Unsqueeze_4379"/>
-			</rt_info>
+		<layer id="2399" name="/up_blocks.1/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4377,onnx::Cast_4378,onnx::Unsqueeze_4379"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2506" name="onnx::Unsqueeze_4381" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4381"/>
-			</rt_info>
+		<layer id="2400" name="Constant_23485" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4381">
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2507" name="onnx::Concat_4382" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4382, onnx::Unsqueeze_4381"/>
-			</rt_info>
+		<layer id="2401" name="MVN_23486" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4382">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2508" name="Constant_90712" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4389"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2509" name="Constant_90713" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2510" name="Gather_90714" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4389"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2511" name="onnx::Reshape_4389" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4389"/>
-			</rt_info>
+		<layer id="2402" name="/up_blocks.1/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4389">
+				<port id="1" precision="I64" names="/up_blocks.1/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2512" name="onnx::Transpose_4390" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4390"/>
-			</rt_info>
+		<layer id="2403" name="/up_blocks.1/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4390">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2513" name="Constant_16291" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16291"/>
-			</rt_info>
+		<layer id="2404" name="Constant_87057_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="1196437202" size="5120" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2514" name="onnx::Reshape_4391" type="Transpose" version="opset1">
+		<layer id="2405" name="Constant_87057" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4391"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4391">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2515" name="onnx::Div_4392" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4392"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4392"/>
-			</output>
-		</layer>
-		<layer id="2516" name="onnx::Cast_4393" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4393, onnx::Cast_4394, onnx::Div_4392, onnx::Unsqueeze_4395"/>
-			</rt_info>
+		<layer id="2406" name="/up_blocks.1/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4393,onnx::Cast_4394,onnx::Unsqueeze_4395"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2517" name="onnx::Unsqueeze_4398" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4398"/>
-			</rt_info>
+		<layer id="2407" name="Constant_87058_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 2560, 1, 1" offset="1196442322" size="5120" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4398">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2518" name="onnx::Concat_4399" type="Unsqueeze" version="opset1">
+		<layer id="2408" name="Constant_87058" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4399, onnx::Unsqueeze_4398"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4399">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2519" name="Constant_88815" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16174, onnx::Concat_4401, onnx::Gather_4371, onnx::Unsqueeze_4372, onnx::Unsqueeze_4400"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2520" name="Constant_16174" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16174"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2521" name="onnx::Unsqueeze_4372" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16174, onnx::Concat_4401, onnx::Gather_4371, onnx::Unsqueeze_4372, onnx::Unsqueeze_4400"/>
-			</rt_info>
+		<layer id="2409" name="/up_blocks.1/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>2560</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4401">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm1/Add_output_0">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2522" name="onnx::Gather_4374" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4374"/>
-			</rt_info>
+		<layer id="2410" name="/up_blocks.1/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4374"/>
+				<port id="1" precision="FP32" names="/up_blocks.1/resnets.1/nonlinearity/Mul_output_0">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2523" name="Constant_16178" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16178"/>
-			</rt_info>
+		<layer id="2411" name="up_blocks.1.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 2560, 3, 3" offset="1196447442" size="58982400" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2524" name="onnx::Unsqueeze_4375" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2412" name="up_blocks.1.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16178, onnx::Gather_4374, onnx::Unsqueeze_4375"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4375"/>
-			</output>
-		</layer>
-		<layer id="2525" name="onnx::Mul_4396" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4396"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4396"/>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.1.conv1.weight">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2526" name="onnx::Unsqueeze_4397" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4396, onnx::Unsqueeze_4397"/>
-			</rt_info>
+		<layer id="2413" name="/up_blocks.1/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>2560</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>2560</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4397"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2527" name="onnx::Unsqueeze_4402" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4402"/>
-			</rt_info>
+		<layer id="2414" name="Reshape_23610_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1255429842" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4402">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2528" name="onnx::Concat_4403" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4403, onnx::Unsqueeze_4402"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4403">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2529" name="onnx::Reshape_4404" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2415" name="Reshape_23610" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4404"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4404">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2530" name="onnx::MatMul_4405" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4405"/>
-			</rt_info>
+		<layer id="2416" name="/up_blocks.1/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4405">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2417" name="up_blocks.1.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1255432402" size="3276800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2531" name="Constant_148021" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2287995184" size="6553600"/>
+		<layer id="2418" name="up_blocks.1.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4407, onnx::MatMul_8871"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.1.time_emb_proj.weight">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2532" name="onnx::Add_4407" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4407, onnx::MatMul_8871"/>
-			</rt_info>
+		<layer id="2419" name="/up_blocks.1/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
@@ -43311,83 +39925,70 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4407">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2533" name="input.632" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2420" name="Constant_87059_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="1258709202" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2421" name="Constant_87059" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.632"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.632">
-					<dim>2</dim>
-					<dim>256</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2534" name="input.636" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.636"/>
-			</rt_info>
+		<layer id="2422" name="/up_blocks.1/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.636">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2535" name="Constant_16412" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16412"/>
-			</rt_info>
+		<layer id="2423" name="/up_blocks.1/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2536" name="onnx::Mul_4418" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4414, onnx::Div_4417, onnx::Mul_4418, onnx::Pow_4411, onnx::ReduceMean_4413, onnx::Sqrt_4416, onnx::Sub_4410"/>
-			</rt_info>
+		<layer id="2424" name="/up_blocks.1/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -43395,752 +39996,522 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4418">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2537" name="Constant_150463" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2294548784" size="5120"/>
+		<layer id="2425" name="/up_blocks.1/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2538" name="onnx::Add_4419" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4419"/>
-			</rt_info>
+		<layer id="2426" name="/up_blocks.1/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4419">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2539" name="Constant_150464" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2294553904" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2540" name="onnx::MatMul_4420" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4420"/>
-			</rt_info>
+		<layer id="2427" name="/up_blocks.1/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4420">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/Add_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2541" name="Constant_148029" type="Const" version="opset1">
-			<data element_type="f32" shape="10240, 1280" offset="2294559024" size="52428800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4422, onnx::MatMul_8872"/>
-			</rt_info>
+		<layer id="2428" name="/up_blocks.1/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>10240</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.1/norm2/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2542" name="onnx::Add_4422" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4422, onnx::MatMul_8872"/>
-			</rt_info>
+		<layer id="2429" name="/up_blocks.1/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>10240</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4422">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2543" name="onnx::Shape_4423" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_4423"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+		<layer id="2430" name="Constant_23658" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64">
 					<dim>1</dim>
-					<dim>10240</dim>
 				</port>
-				<port id="1" precision="FP32">
+			</output>
+		</layer>
+		<layer id="2431" name="MVN_23659" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_4423">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2544" name="Constant_127074" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2545" name="Constant_127075" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+		<layer id="2432" name="/up_blocks.1/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2546" name="Constant_127071" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="I64" names="/up_blocks.1/resnets.1/norm2/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2547" name="onnx::Gather_4424" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4424"/>
-			</rt_info>
+		<layer id="2433" name="/up_blocks.1/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>32</dim>
 					<dim>10240</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4424">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2548" name="onnx::Gather_4425" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4425"/>
-			</rt_info>
+		<layer id="2434" name="Constant_87060_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1258711762" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4425">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2549" name="Constant_16429" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2435" name="Constant_87060" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16429"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2550" name="onnx::Add_4426" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16429, onnx::Add_4426, onnx::Gather_4425"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_4426">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2551" name="onnx::Add_4428" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4428"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Add_4428">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2552" name="onnx::Div_4429" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4428, onnx::Div_4429"/>
-			</rt_info>
+		<layer id="2436" name="/up_blocks.1/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_4429">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2553" name="onnx::Div_4430" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4430"/>
-			</rt_info>
+		<layer id="2437" name="Constant_87061_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1258714322" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4430">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2554" name="onnx::Mul_4431" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2438" name="Constant_87061" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4430, onnx::Mul_4431, onnx::Mul_4432, onnx::Slice_4433"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_4431,onnx::Slice_4433">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2555" name="Constant_127070" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2556" name="ScatterUpdate_127076" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
+		<layer id="2439" name="/up_blocks.1/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2557" name="Constant_127079" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/norm2/Add_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2558" name="onnx::Mul_4434" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16440, onnx::Gather_4425, onnx::Mul_4434"/>
-			</rt_info>
+		<layer id="2440" name="/up_blocks.1/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_4434">
+				<port id="1" precision="FP32" names="/up_blocks.1/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2559" name="Constant_127143" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2560" name="Constant_127142" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2561" name="Constant_127141" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2562" name="ScatterUpdate_127144" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2441" name="up_blocks.1.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="1258716882" size="29491200" />
 			<output>
-				<port id="4" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2563" name="Constant_127145" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2564" name="onnx::Mul_4435" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4435"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4435">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2565" name="onnx::Slice_4436" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4435, onnx::Slice_4436"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Slice_4436">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2566" name="ScatterUpdate_127146" type="ScatterUpdate" version="opset3">
+		<layer id="2442" name="up_blocks.1.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
 					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.1.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
 					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2567" name="Constant_127149" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2568" name="onnx::Div_4437" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_16509, onnx::Div_4437, onnx::Gather_4425"/>
-			</rt_info>
+		<layer id="2443" name="/up_blocks.1/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_4437">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2569" name="onnx::Mul_4445" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4440, onnx::Erf_4439, onnx::Mul_4442, onnx::Mul_4443, onnx::Mul_4445"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
-				</port>
-			</input>
+		<layer id="2444" name="Reshape_23783_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1288208082" size="2560" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_4445">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2570" name="input.640" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2445" name="Reshape_23783" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.640"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.640">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2571" name="Constant_148037" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 5120" offset="2346987824" size="26214400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4448, onnx::MatMul_8873"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>5120</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2572" name="onnx::Add_4448" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4448, onnx::MatMul_8873"/>
-			</rt_info>
+		<layer id="2446" name="/up_blocks.1/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>5120</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4448">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2573" name="onnx::Add_4449" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4449"/>
-			</rt_info>
+		<layer id="2447" name="/up_blocks.1/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4449">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.1/Add_1_output_0,/up_blocks.1/resnets.1/Div_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2574" name="onnx::Reshape_4450" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4450"/>
-			</rt_info>
+		<layer id="2448" name="/up_blocks.1/attentions.1/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/norm/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2449" name="/up_blocks.1/attentions.1/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4450">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/norm/Reshape_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2575" name="Constant_90725" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4459"/>
-			</rt_info>
+		<layer id="2450" name="Constant_23871" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2576" name="Constant_90726" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2577" name="Gather_90727" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4459"/>
-			</rt_info>
+		<layer id="2451" name="MVN_23872" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4459">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/norm/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2578" name="onnx::Transpose_4460" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4460"/>
-			</rt_info>
+		<layer id="2452" name="/up_blocks.1/attentions.1/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4460">
-					<dim>2</dim>
 					<dim>16</dim>
 					<dim>16</dim>
-					<dim>1280</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2579" name="Constant_16674" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16674"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64" names="/up_blocks.1/attentions.1/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2580" name="input.644" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.644"/>
-			</rt_info>
+		<layer id="2453" name="/up_blocks.1/attentions.1/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.644">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -44148,25 +40519,41 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2581" name="m.up_blocks.1.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="2373202224" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.attentions.0.proj_out.weight"/>
-			</rt_info>
+		<layer id="2454" name="Constant_87062_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1288210642" size="2560" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.attentions.0.proj_out.weight">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2582" name="Convolution_16676" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2455" name="Constant_87062" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_16676"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2456" name="/up_blocks.1/attentions.1/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -44175,14 +40562,14 @@
 					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -44190,10 +40577,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2583" name="Reshape_16696" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2379755824" size="5120"/>
+		<layer id="2457" name="Constant_87063_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1288213202" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -44201,19 +40588,13 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2584" name="onnx::Add_4462" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2458" name="Constant_87063" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_16695, Reshape_16696, onnx::Add_4462"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -44221,19 +40602,16 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4462">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2585" name="onnx::Concat_4463" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4463"/>
-			</rt_info>
+		<layer id="2459" name="/up_blocks.1/attentions.1/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -44242,14 +40620,14 @@
 					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_4463">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -44257,63 +40635,51 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2586" name="onnx::Cast_4464" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.648, onnx::Cast_4464"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+		<layer id="2460" name="up_blocks.1.attentions.1.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="1288215762" size="3276800" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.648,onnx::Cast_4464">
-					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2587" name="m.up_blocks.1.resnets.1.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 1, 1" offset="2379760944" size="13107200"/>
+		<layer id="2461" name="up_blocks.1.attentions.1.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.1.conv_shortcut.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.1.conv_shortcut.weight">
+				<port id="1" precision="FP32" names="up_blocks.1.attentions.1.proj_in.weight">
+					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>2560</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2588" name="Convolution_17066" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_17066"/>
-			</rt_info>
+		<layer id="2462" name="/up_blocks.1/attentions.1/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>2560</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
 					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1280</dim>
-					<dim>2560</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
@@ -44327,10 +40693,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2589" name="Reshape_17086" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2392868144" size="5120"/>
+		<layer id="2463" name="Reshape_23994_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1291492562" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
@@ -44338,11 +40704,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2590" name="onnx::Add_4509" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2464" name="Reshape_23994" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_17085, Reshape_17086, onnx::Add_4509"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2465" name="/up_blocks.1/attentions.1/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -44358,7 +40743,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4509">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>16</dim>
@@ -44366,1614 +40751,1448 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="2591" name="onnx::Reshape_4466" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4466"/>
-			</rt_info>
+		<layer id="2466" name="Constant_24022" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4466">
-					<dim>3</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2592" name="onnx::InstanceNormalization_4467" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4467"/>
-			</rt_info>
+		<layer id="2467" name="/up_blocks.1/attentions.1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>2560</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
 					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4467">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2593" name="Constant_16736" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16736"/>
-			</rt_info>
+		<layer id="2468" name="/up_blocks.1/attentions.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2594" name="MVN_16737" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_16756, Concat_16801, MVN_16737, Multiply_16784, Reshape_16757, Reshape_16802, onnx::Reshape_4470"/>
-			</rt_info>
+		<layer id="2469" name="/up_blocks.1/attentions.1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4470">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2595" name="onnx::Reshape_4471" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4471"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
+		<layer id="2470" name="Constant_24031" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_4471">
-					<dim>4</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2596" name="onnx::Mul_4472" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4472"/>
-			</rt_info>
+		<layer id="2471" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4472">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2597" name="Constant_150467" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="2392873264" size="10240"/>
+		<layer id="2472" name="Constant_87064_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1291495122" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>2560</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2598" name="onnx::Add_4475" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2473" name="Constant_87064" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4475"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>2560</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4475">
-					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2599" name="Constant_150468" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 2560, 1, 1" offset="2392883504" size="10240"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>2560</dim>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2600" name="onnx::Cast_4478" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.652, onnx::Cast_4478"/>
-			</rt_info>
+		<layer id="2474" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>2560</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.652,onnx::Cast_4478">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2601" name="input.656" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.656, onnx::Mul_4480"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
+		<layer id="2475" name="Constant_87065_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1291497682" size="2560" />
 			<output>
-				<port id="1" precision="FP32" names="input.656">
-					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2602" name="m.up_blocks.1.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 2560, 3, 3" offset="2392893744" size="117964800"/>
+		<layer id="2476" name="Constant_87065" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.1.conv1.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.1.conv1.weight">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>2560</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2603" name="Convolution_16842" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_16842"/>
-			</rt_info>
+		<layer id="2477" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>2560</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>2560</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2604" name="Reshape_16862" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2510858544" size="5120"/>
+		<layer id="2478" name="Constant_85574_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1291500242" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2605" name="onnx::Add_4482" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2479" name="Constant_85574" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_16861, Reshape_16862, onnx::Add_4482"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4482">
-					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2606" name="onnx::Gemm_4484" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_4484, onnx::Mul_4483"/>
-			</rt_info>
+		<layer id="2480" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_4484">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2607" name="m.up_blocks.1.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2510863664" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.1.time_emb_proj.weight"/>
-			</rt_info>
+		<layer id="2481" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.1.time_emb_proj.weight">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2608" name="MatMul_16894" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_16894"/>
-			</rt_info>
+		<layer id="2482" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2609" name="Constant_150469" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="2517417264" size="5120"/>
+		<layer id="2483" name="Constant_24054" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2610" name="onnx::Unsqueeze_4485" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_16895, onnx::Unsqueeze_4485"/>
-			</rt_info>
+		<layer id="2484" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_4485">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2611" name="onnx::Unsqueeze_4486" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4486"/>
-			</rt_info>
+		<layer id="2485" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4486">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2612" name="onnx::Unsqueeze_4487" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4487"/>
-			</rt_info>
+		<layer id="2486" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_4487">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2613" name="onnx::Unsqueeze_4488" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4488"/>
-			</rt_info>
+		<layer id="2487" name="Constant_85581_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1294777042" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4488">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2614" name="onnx::Add_4489" type="Unsqueeze" version="opset1">
+		<layer id="2488" name="Constant_85581" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4489"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4489">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2615" name="onnx::Cast_4490" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.660, onnx::Cast_4490"/>
-			</rt_info>
+		<layer id="2489" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.660,onnx::Cast_4490">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2616" name="onnx::Reshape_4492" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4492"/>
-			</rt_info>
+		<layer id="2490" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4492">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2617" name="onnx::InstanceNormalization_4493" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4493"/>
-			</rt_info>
+		<layer id="2491" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4493">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2618" name="Constant_16912" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_16912"/>
-			</rt_info>
+		<layer id="2492" name="Constant_24070" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2619" name="MVN_16913" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_16932, Concat_16977, MVN_16913, Multiply_16960, Reshape_16933, Reshape_16978, onnx::Reshape_4496"/>
-			</rt_info>
+		<layer id="2493" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4496">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2620" name="onnx::Reshape_4497" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4497"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
+		<layer id="2494" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_4497">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2621" name="onnx::Mul_4498" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4498"/>
-			</rt_info>
+		<layer id="2495" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4498">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_3_output_0">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2622" name="Constant_150470" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2517422384" size="5120"/>
+		<layer id="2496" name="Constant_87066_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2623" name="onnx::Add_4501" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2497" name="Constant_87066" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4501"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4501">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2624" name="Constant_150471" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2517427504" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2625" name="onnx::Cast_4504" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.664, onnx::Cast_4504"/>
-			</rt_info>
+		<layer id="2498" name="Multiply_86207" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.664,onnx::Cast_4504">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2626" name="input.668" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.668, onnx::Mul_4506"/>
-			</rt_info>
+		<layer id="2499" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.668">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Add_output_0,/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Mul_output_0">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2500" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Softmax_output_0">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2627" name="m.up_blocks.1.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="2517432624" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="2501" name="Constant_85588_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1298053842" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2628" name="Convolution_17018" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="2502" name="Constant_85588" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_17018"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2629" name="Reshape_17038" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2576415024" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2630" name="onnx::Add_4508" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_17037, Reshape_17038, onnx::Add_4508"/>
-			</rt_info>
+		<layer id="2503" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4508">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2631" name="onnx::Div_4510" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.672, onnx::Div_4510"/>
-			</rt_info>
+		<layer id="2504" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2505" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.672,onnx::Div_4510">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2632" name="onnx::Reshape_4525" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4525"/>
-			</rt_info>
+		<layer id="2506" name="Constant_24086" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4525">
-					<dim>3</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2633" name="onnx::InstanceNormalization_4526" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4526"/>
-			</rt_info>
+		<layer id="2507" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4526">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2634" name="Constant_17142" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17142"/>
-			</rt_info>
+		<layer id="2508" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2635" name="MVN_17143" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_17162, Concat_17207, MVN_17143, Multiply_17190, Reshape_17163, Reshape_17208, onnx::Reshape_4529"/>
-			</rt_info>
+		<layer id="2509" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4529">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2636" name="onnx::Reshape_4530" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4513, onnx::Gather_4516, onnx::Gather_4519, onnx::Gather_4522, onnx::Reshape_4530"/>
-			</rt_info>
+		<layer id="2510" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4513,onnx::Gather_4516,onnx::Gather_4519,onnx::Gather_4522,onnx::Reshape_4530">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2511" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2637" name="onnx::Mul_4531" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4531"/>
-			</rt_info>
+		<layer id="2512" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4531">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2638" name="Constant_150472" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2576420144" size="5120"/>
+		<layer id="2513" name="Constant_24112" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2639" name="onnx::Add_4534" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4534"/>
-			</rt_info>
+		<layer id="2514" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4534">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2640" name="Constant_150473" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2576425264" size="5120"/>
+		<layer id="2515" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2641" name="input.676" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.676"/>
-			</rt_info>
+		<layer id="2516" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.676">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2642" name="m.up_blocks.1.attentions.1.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="2576430384" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.attentions.1.proj_in.weight"/>
-			</rt_info>
+		<layer id="2517" name="Constant_85595_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1301330642" size="3276800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.attentions.1.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2643" name="Convolution_17245" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2518" name="Constant_85595" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_17245"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2519" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2644" name="Reshape_17265" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2582983984" size="5120"/>
-			<output>
+		<layer id="2520" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn1/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2645" name="onnx::Transpose_4538" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_17264, Reshape_17265, onnx::Transpose_4538"/>
-			</rt_info>
+		<layer id="2521" name="/up_blocks.1/attentions.1/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4538">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2646" name="Constant_17293" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17293"/>
-			</rt_info>
+		<layer id="2522" name="Constant_24124" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2647" name="onnx::Reshape_4539" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4539"/>
-			</rt_info>
+		<layer id="2523" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4539">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2648" name="Constant_88860" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17119, onnx::Concat_4542, onnx::Gather_4514, onnx::Unsqueeze_4515, onnx::Unsqueeze_4541"/>
-			</rt_info>
+		<layer id="2524" name="Constant_87068_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1304607442" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2649" name="Constant_17119" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2525" name="Constant_87068" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17119"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2650" name="onnx::Unsqueeze_4515" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17119, onnx::Concat_4542, onnx::Gather_4514, onnx::Unsqueeze_4515, onnx::Unsqueeze_4541"/>
-			</rt_info>
+		<layer id="2526" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4542">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2527" name="Constant_87069_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1304610002" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2651" name="onnx::Gather_4520" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
+		<layer id="2528" name="Constant_87069" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4520"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4520"/>
-			</output>
-		</layer>
-		<layer id="2652" name="Constant_17127" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17127"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2653" name="onnx::Mul_4521" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17127, onnx::Gather_4520, onnx::Mul_4521"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_4521"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2654" name="onnx::Gather_4523" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4523"/>
-			</rt_info>
+		<layer id="2529" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4523"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm2/Add_1_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2655" name="Constant_17131" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17131"/>
-			</rt_info>
+		<layer id="2530" name="Constant_85603_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1304612562" size="3276800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2656" name="onnx::Mul_4524" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2531" name="Constant_85603" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17131, onnx::Gather_4523, onnx::Mul_4524"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_4524"/>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2657" name="onnx::Unsqueeze_4540" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4540"/>
-			</rt_info>
+		<layer id="2532" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4540"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_q/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2658" name="onnx::Unsqueeze_4543" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4543"/>
-			</rt_info>
+		<layer id="2533" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4543">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2659" name="onnx::Concat_4544" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4544, onnx::Unsqueeze_4543"/>
-			</rt_info>
+		<layer id="2534" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4544">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2660" name="Constant_88869" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17123, onnx::Concat_4546, onnx::Gather_4517, onnx::Unsqueeze_4518, onnx::Unsqueeze_4545"/>
-			</rt_info>
+		<layer id="2535" name="Constant_24147" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2661" name="Constant_17123" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17123"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2662" name="onnx::Unsqueeze_4518" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17123, onnx::Concat_4546, onnx::Gather_4517, onnx::Unsqueeze_4518, onnx::Unsqueeze_4545"/>
-			</rt_info>
+		<layer id="2536" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4546">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2663" name="onnx::Reshape_4547" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4547"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2537" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4547">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2664" name="input.680" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.680"/>
-			</rt_info>
+		<layer id="2538" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.680">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2665" name="Constant_17378" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17378"/>
-			</rt_info>
+		<layer id="2539" name="Constant_85610_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="1307889362" size="1966080" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2666" name="onnx::Mul_4557" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="2540" name="Constant_85610" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4553, onnx::Div_4556, onnx::Mul_4557, onnx::Pow_4550, onnx::ReduceMean_4552, onnx::Sqrt_4555, onnx::Sub_4549"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4557">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2667" name="Constant_150474" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2582989104" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2668" name="onnx::Add_4558" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4558"/>
-			</rt_info>
+		<layer id="2541" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4558">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2669" name="Constant_150475" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2582994224" size="5120"/>
+		<layer id="2542" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2670" name="onnx::MatMul_4559" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4559"/>
-			</rt_info>
+		<layer id="2543" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4559">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2671" name="Constant_148048" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2582999344" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8880, q.127"/>
-			</rt_info>
+		<layer id="2544" name="Constant_24163" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2672" name="q.127" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8880, q.127"/>
-			</rt_info>
+		<layer id="2545" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.127">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2673" name="Constant_107820" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2546" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2674" name="onnx::Gather_4566" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4566, onnx::Gather_4569, onnx::Gather_4572"/>
-			</rt_info>
+		<layer id="2547" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4566,onnx::Gather_4569,onnx::Gather_4572">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2675" name="onnx::Gather_4573" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4573"/>
-			</rt_info>
+		<layer id="2548" name="Constant_87070_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4573"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2676" name="Constant_17404" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2549" name="Constant_87070" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17404"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2677" name="onnx::Div_4574" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17404, onnx::Div_4574, onnx::Gather_4573"/>
-			</rt_info>
+		<layer id="2550" name="Multiply_86209" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4574"/>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2678" name="onnx::Div_4575" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4575"/>
-			</rt_info>
+		<layer id="2551" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4575"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Add_output_0,/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2679" name="onnx::Cast_4576" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4576, onnx::Cast_4577, onnx::Div_4575, onnx::Unsqueeze_4578"/>
-			</rt_info>
+		<layer id="2552" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4576,onnx::Cast_4577,onnx::Unsqueeze_4578"/>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2680" name="onnx::Unsqueeze_4586" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4586"/>
-			</rt_info>
+		<layer id="2553" name="Constant_85617_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="1309855442" size="1966080" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4586">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2681" name="onnx::Concat_4587" type="Unsqueeze" version="opset1">
+		<layer id="2554" name="Constant_85617" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4587, onnx::Unsqueeze_4586"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4587">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2682" name="onnx::Reshape_4588" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4588"/>
-			</rt_info>
+		<layer id="2555" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2556" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_4_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2683" name="onnx::Transpose_4589" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4589"/>
-			</rt_info>
+		<layer id="2557" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -45981,33 +42200,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4589">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2684" name="Constant_17517" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17517"/>
-			</rt_info>
+		<layer id="2558" name="Constant_24179" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2685" name="onnx::Reshape_4590" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4590"/>
-			</rt_info>
+		<layer id="2559" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>160</dim>
 				</port>
@@ -46016,249 +42229,224 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4590">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>256</dim>
+					<dim>77</dim>
 					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2686" name="onnx::Gather_4567" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4567"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4567"/>
-			</output>
-		</layer>
-		<layer id="2687" name="Constant_17396" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17396"/>
-			</rt_info>
+		<layer id="2560" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2688" name="onnx::Unsqueeze_4568" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17396, onnx::Gather_4567, onnx::Unsqueeze_4568"/>
-			</rt_info>
+		<layer id="2561" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4568"/>
-			</output>
-		</layer>
-		<layer id="2689" name="onnx::Mul_4591" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4591"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4591"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2690" name="onnx::Unsqueeze_4592" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4591, onnx::Unsqueeze_4592"/>
-			</rt_info>
+		<layer id="2562" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4592"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2691" name="onnx::Unsqueeze_4597" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4597"/>
-			</rt_info>
+		<layer id="2563" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4597">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2692" name="onnx::Concat_4598" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4598, onnx::Unsqueeze_4597"/>
-			</rt_info>
+		<layer id="2564" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4598">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2693" name="Constant_88896" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17400, onnx::Concat_4600, onnx::Gather_4570, onnx::Unsqueeze_4571, onnx::Unsqueeze_4599"/>
-			</rt_info>
+		<layer id="2565" name="Constant_24205" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2694" name="Constant_17400" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17400"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2695" name="onnx::Unsqueeze_4571" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17400, onnx::Concat_4600, onnx::Gather_4570, onnx::Unsqueeze_4571, onnx::Unsqueeze_4599"/>
-			</rt_info>
+		<layer id="2566" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4600">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2696" name="onnx::Div_4593" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4593"/>
-			</rt_info>
+		<layer id="2567" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4593"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2697" name="onnx::Cast_4594" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4594, onnx::Cast_4595, onnx::Div_4593, onnx::Unsqueeze_4596"/>
-			</rt_info>
+		<layer id="2568" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4594,onnx::Cast_4595,onnx::Unsqueeze_4596"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2698" name="onnx::Unsqueeze_4601" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4601"/>
-			</rt_info>
+		<layer id="2569" name="Constant_85624_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1311821522" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4601">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2699" name="onnx::Concat_4602" type="Unsqueeze" version="opset1">
+		<layer id="2570" name="Constant_85624" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4602, onnx::Unsqueeze_4601"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4602">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2700" name="onnx::Reshape_4603" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4603"/>
-			</rt_info>
+		<layer id="2571" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4603">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2701" name="q.131" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.131"/>
-			</rt_info>
+		<layer id="2572" name="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.131">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
 					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2702" name="Constant_148055" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2589552944" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.127, onnx::MatMul_8881"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2703" name="k.127" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.127, onnx::MatMul_8881"/>
-			</rt_info>
+		<layer id="2573" name="/up_blocks.1/attentions.1/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -46266,316 +42454,276 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.127">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2704" name="Constant_107889" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2574" name="Constant_24217" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2705" name="onnx::Gather_4605" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4605, onnx::Gather_4608, onnx::Gather_4611"/>
-			</rt_info>
+		<layer id="2575" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4605,onnx::Gather_4608,onnx::Gather_4611">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm3/Div_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2706" name="onnx::Gather_4612" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4612"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4612"/>
-			</output>
-		</layer>
-		<layer id="2707" name="Constant_17645" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17645"/>
-			</rt_info>
+		<layer id="2576" name="Constant_87072_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1315098322" size="2560" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2708" name="onnx::Div_4613" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2577" name="Constant_87072" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17645, onnx::Div_4613, onnx::Gather_4612"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4613"/>
-			</output>
-		</layer>
-		<layer id="2709" name="onnx::Div_4614" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4614"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4614"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2710" name="onnx::Cast_4615" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4615, onnx::Cast_4616, onnx::Div_4614, onnx::Unsqueeze_4617"/>
-			</rt_info>
+		<layer id="2578" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4615,onnx::Cast_4616,onnx::Unsqueeze_4617"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm3/Mul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2711" name="onnx::Unsqueeze_4625" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4625"/>
-			</rt_info>
+		<layer id="2579" name="Constant_87073_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1315100882" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4625">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2712" name="onnx::Concat_4626" type="Unsqueeze" version="opset1">
+		<layer id="2580" name="Constant_87073" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4626, onnx::Unsqueeze_4625"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4626">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2713" name="onnx::Reshape_4627" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4627"/>
-			</rt_info>
+		<layer id="2581" name="/up_blocks.1/attentions.1/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/norm3/Add_1_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2582" name="Constant_85632_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="10240, 1280" offset="1315103442" size="26214400" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2714" name="onnx::Transpose_4628" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="2583" name="Constant_85632" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4628"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>10240</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2584" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>10240</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4628">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2715" name="Constant_17758" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17758"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2716" name="onnx::Reshape_4629" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4629"/>
-			</rt_info>
+		<layer id="2585" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>10240</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4629">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2717" name="onnx::Gather_4606" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4606"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4606"/>
-			</output>
-		</layer>
-		<layer id="2718" name="Constant_17637" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17637"/>
-			</rt_info>
+		<layer id="2586" name="Constant_77816" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2719" name="onnx::Unsqueeze_4607" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17637, onnx::Gather_4606, onnx::Unsqueeze_4607"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4607"/>
-			</output>
-		</layer>
-		<layer id="2720" name="onnx::Mul_4630" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4630"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4630"/>
 			</output>
 		</layer>
-		<layer id="2721" name="onnx::Unsqueeze_4631" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4630, onnx::Unsqueeze_4631"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="2587" name="Constant_77817" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4631"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2722" name="onnx::Unsqueeze_4636" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4636"/>
-			</rt_info>
+		<layer id="2588" name="Constant_77813" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4636">
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2723" name="onnx::Concat_4637" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4637, onnx::Unsqueeze_4636"/>
-			</rt_info>
+		<layer id="2589" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4637">
-					<dim>1</dim>
+				<port id="1" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Shape_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2724" name="Constant_88923" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17641, onnx::Concat_4639, onnx::Gather_4609, onnx::Unsqueeze_4610, onnx::Unsqueeze_4638"/>
-			</rt_info>
+		<layer id="2590" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2725" name="Constant_17641" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17641"/>
-			</rt_info>
+		<layer id="2591" name="Constant_24234" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="2726" name="onnx::Unsqueeze_4610" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17641, onnx::Concat_4639, onnx::Gather_4609, onnx::Unsqueeze_4610, onnx::Unsqueeze_4638"/>
-			</rt_info>
+		<layer id="2592" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -46583,71 +42731,74 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4639">
+				<port id="3" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2727" name="onnx::Div_4632" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4632"/>
-			</rt_info>
+		<layer id="2593" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4632"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_2_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2728" name="onnx::Cast_4633" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4633, onnx::Cast_4634, onnx::Div_4632, onnx::Unsqueeze_4635"/>
-			</rt_info>
+		<layer id="2594" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4633,onnx::Cast_4634,onnx::Unsqueeze_4635"/>
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2729" name="onnx::Unsqueeze_4640" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4640"/>
-			</rt_info>
+		<layer id="2595" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4640">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2730" name="onnx::Concat_4641" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4641, onnx::Unsqueeze_4640"/>
-			</rt_info>
+		<layer id="2596" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4641">
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2731" name="onnx::Reshape_4642" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4642"/>
-			</rt_info>
+		<layer id="2597" name="Constant_77812" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
+			<output>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2598" name="ScatterUpdate_77818" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
@@ -46655,1044 +42806,1029 @@
 				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4642">
+				<port id="4" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2732" name="k.131" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.131"/>
-			</rt_info>
+		<layer id="2599" name="Constant_77821" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2600" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.131">
-					<dim>16</dim>
+				<port id="4" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice_output_0">
+					<dim>2</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2733" name="onnx::Mul_4683" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4683"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</input>
+		<layer id="2601" name="Constant_77885" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4683">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2734" name="Constant_150476" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
+		<layer id="2602" name="Constant_77884" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2603" name="Constant_77883" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
+			<output>
+				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2735" name="onnx::Softmax_4685" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_4685"/>
-			</rt_info>
+		<layer id="2604" name="ScatterUpdate_77886" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
+				<port id="1" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_4685">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2736" name="attn.63" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.63"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+		<layer id="2605" name="Constant_77887" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="2606" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="1" precision="FP32" names="attn.63">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Constant_5_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2737" name="Constant_148062" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2596106544" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8882, v.127"/>
-			</rt_info>
+		<layer id="2607" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_1_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2738" name="v.127" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8882, v.127"/>
-			</rt_info>
+		<layer id="2608" name="ScatterUpdate_77888" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.127">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2739" name="Constant_107958" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2609" name="Constant_77891" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2740" name="onnx::Gather_4644" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4644, onnx::Gather_4647, onnx::Gather_4650"/>
-			</rt_info>
+		<layer id="2610" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>10240</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4644,onnx::Gather_4647,onnx::Gather_4650">
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2741" name="onnx::Gather_4651" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4651"/>
-			</rt_info>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4651"/>
+				<port id="4" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Slice_1_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2742" name="Constant_17886" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17886"/>
-			</rt_info>
+		<layer id="2611" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_3_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2743" name="onnx::Div_4652" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17886, onnx::Div_4652, onnx::Gather_4651"/>
-			</rt_info>
+		<layer id="2612" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4652"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.0/Mul_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2744" name="onnx::Div_4653" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4653"/>
-			</rt_info>
+		<layer id="2613" name="Constant_85640_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 5120" offset="1341317842" size="13107200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4653"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2745" name="onnx::Cast_4654" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2614" name="Constant_85640" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4654, onnx::Cast_4655, onnx::Div_4653, onnx::Unsqueeze_4656"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4654,onnx::Cast_4655,onnx::Unsqueeze_4656"/>
-			</output>
-		</layer>
-		<layer id="2746" name="onnx::Unsqueeze_4664" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4664"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4664">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2747" name="onnx::Concat_4665" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4665, onnx::Unsqueeze_4664"/>
-			</rt_info>
+		<layer id="2615" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4665">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2748" name="onnx::Reshape_4666" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4666"/>
-			</rt_info>
+		<layer id="2616" name="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2749" name="onnx::Transpose_4667" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4667"/>
-			</rt_info>
+		<layer id="2617" name="/up_blocks.1/attentions.1/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4667">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2750" name="Constant_17999" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17999"/>
-			</rt_info>
+		<layer id="2618" name="/up_blocks.1/attentions.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="214428690" size="32" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.1/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2751" name="onnx::Reshape_4668" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4668"/>
-			</rt_info>
+		<layer id="2619" name="/up_blocks.1/attentions.1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4668">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2752" name="onnx::Gather_4645" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4645"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4645"/>
-			</output>
-		</layer>
-		<layer id="2753" name="Constant_17878" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17878"/>
-			</rt_info>
+		<layer id="2620" name="Constant_24399" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2754" name="onnx::Unsqueeze_4646" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17878, onnx::Gather_4645, onnx::Unsqueeze_4646"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4646"/>
 			</output>
 		</layer>
-		<layer id="2755" name="onnx::Mul_4669" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4669"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4669"/>
-			</output>
-		</layer>
-		<layer id="2756" name="onnx::Unsqueeze_4670" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4669, onnx::Unsqueeze_4670"/>
-			</rt_info>
+		<layer id="2621" name="/up_blocks.1/attentions.1/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4670"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2757" name="onnx::Unsqueeze_4675" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4675"/>
-			</rt_info>
+		<layer id="2622" name="up_blocks.1.attentions.1.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="1354425042" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4675">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2758" name="onnx::Concat_4676" type="Unsqueeze" version="opset1">
+		<layer id="2623" name="up_blocks.1.attentions.1.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4676, onnx::Unsqueeze_4675"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4676">
+				<port id="1" precision="FP32" names="up_blocks.1.attentions.1.proj_out.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2759" name="Constant_88950" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17882, onnx::Concat_4678, onnx::Gather_4648, onnx::Unsqueeze_4649, onnx::Unsqueeze_4677"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2760" name="Constant_17882" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17882"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2761" name="onnx::Unsqueeze_4649" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_17882, onnx::Concat_4678, onnx::Gather_4648, onnx::Unsqueeze_4649, onnx::Unsqueeze_4677"/>
-			</rt_info>
+		<layer id="2624" name="/up_blocks.1/attentions.1/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4678">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2762" name="onnx::Div_4671" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4671"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4671"/>
-			</output>
-		</layer>
-		<layer id="2763" name="onnx::Cast_4672" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4672, onnx::Cast_4673, onnx::Div_4671, onnx::Unsqueeze_4674"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4672,onnx::Cast_4673,onnx::Unsqueeze_4674"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2764" name="onnx::Unsqueeze_4679" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4679"/>
-			</rt_info>
+		<layer id="2625" name="Reshape_24421_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1357701842" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4679">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2765" name="onnx::Concat_4680" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4680, onnx::Unsqueeze_4679"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4680">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2766" name="onnx::Reshape_4681" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2626" name="Reshape_24421" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4681"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4681">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2767" name="v.131" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.131"/>
-			</rt_info>
+		<layer id="2627" name="/up_blocks.1/attentions.1/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.131">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/proj_out/Conv_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2768" name="out.63" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.63"/>
-			</rt_info>
+		<layer id="2628" name="/up_blocks.1/attentions.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.63">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.1/Add_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2769" name="onnx::Gather_4688" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4688, onnx::Gather_4691, onnx::Gather_4694"/>
-			</rt_info>
+		<layer id="2629" name="/up_blocks.1/Concat_2" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4688,onnx::Gather_4691,onnx::Gather_4694">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/Concat_2_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2770" name="onnx::Gather_4689" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4689"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4689"/>
-			</output>
-		</layer>
-		<layer id="2771" name="Constant_18124" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18124"/>
-			</rt_info>
+		<layer id="2630" name="up_blocks.1.resnets.2.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1920, 1, 1" offset="1357704402" size="4915200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2772" name="onnx::Div_4690" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2631" name="up_blocks.1.resnets.2.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18124, onnx::Div_4690, onnx::Gather_4689"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4690"/>
-			</output>
-		</layer>
-		<layer id="2773" name="onnx::Div_4697" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4697"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4697"/>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.2.conv_shortcut.weight">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2774" name="onnx::Cast_4698" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4698, onnx::Cast_4699, onnx::Div_4697, onnx::Unsqueeze_4700"/>
-			</rt_info>
+		<layer id="2632" name="/up_blocks.1/resnets.2/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4698,onnx::Cast_4699,onnx::Unsqueeze_4700"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2775" name="onnx::Unsqueeze_4702" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4702"/>
-			</rt_info>
+		<layer id="2633" name="Reshape_24806_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1362619602" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4702">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2776" name="onnx::Concat_4703" type="Unsqueeze" version="opset1">
+		<layer id="2634" name="Reshape_24806" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4703, onnx::Unsqueeze_4702"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4703">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2777" name="Constant_90744" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4710"/>
-			</rt_info>
+		<layer id="2635" name="/up_blocks.1/resnets.2/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2778" name="Constant_90745" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2636" name="/up_blocks.1/resnets.2/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.2/norm1/Constant_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2779" name="Gather_90746" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4710"/>
-			</rt_info>
+		<layer id="2637" name="/up_blocks.1/resnets.2/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm1/Reshape_output_0">
 					<dim>2</dim>
+					<dim>32</dim>
+					<dim>15360</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2780" name="onnx::Reshape_4710" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4710"/>
-			</rt_info>
-			<input>
+		<layer id="2638" name="Constant_24460" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4710">
-					<dim>4</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="2781" name="onnx::Transpose_4711" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4711"/>
-			</rt_info>
+		<layer id="2639" name="MVN_24461" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>15360</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4711">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>15360</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2782" name="Constant_18245" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18245"/>
-			</rt_info>
+		<layer id="2640" name="/up_blocks.1/resnets.2/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64" names="/up_blocks.1/resnets.2/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2783" name="onnx::Reshape_4712" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4712"/>
-			</rt_info>
+		<layer id="2641" name="/up_blocks.1/resnets.2/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>15360</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4712">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm1/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2784" name="onnx::Div_4713" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4713"/>
-			</rt_info>
+		<layer id="2642" name="Constant_87076_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1920, 1, 1" offset="1362622162" size="3840" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4713"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2785" name="onnx::Cast_4714" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2643" name="Constant_87076" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4714, onnx::Cast_4715, onnx::Div_4713, onnx::Unsqueeze_4716"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4714,onnx::Cast_4715,onnx::Unsqueeze_4716"/>
-			</output>
-		</layer>
-		<layer id="2786" name="onnx::Unsqueeze_4719" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4719"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4719">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2787" name="onnx::Concat_4720" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4720, onnx::Unsqueeze_4719"/>
-			</rt_info>
+		<layer id="2644" name="/up_blocks.1/resnets.2/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4720">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2788" name="Constant_88977" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18128, onnx::Concat_4722, onnx::Gather_4692, onnx::Unsqueeze_4693, onnx::Unsqueeze_4721"/>
-			</rt_info>
+		<layer id="2645" name="Constant_87077_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1920, 1, 1" offset="1362626002" size="3840" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2789" name="Constant_18128" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2646" name="Constant_87077" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18128"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2790" name="onnx::Unsqueeze_4693" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18128, onnx::Concat_4722, onnx::Gather_4692, onnx::Unsqueeze_4693, onnx::Unsqueeze_4721"/>
-			</rt_info>
+		<layer id="2647" name="/up_blocks.1/resnets.2/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4722">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm1/Add_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2791" name="onnx::Gather_4695" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4695"/>
-			</rt_info>
+		<layer id="2648" name="/up_blocks.1/resnets.2/nonlinearity/Mul" type="Swish" version="opset4">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4695"/>
+				<port id="1" precision="FP32" names="/up_blocks.1/resnets.2/nonlinearity/Mul_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2792" name="Constant_18132" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18132"/>
-			</rt_info>
+		<layer id="2649" name="up_blocks.1.resnets.2.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1920, 3, 3" offset="1362629842" size="44236800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2793" name="onnx::Unsqueeze_4696" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2650" name="up_blocks.1.resnets.2.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18132, onnx::Gather_4695, onnx::Unsqueeze_4696"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4696"/>
-			</output>
-		</layer>
-		<layer id="2794" name="onnx::Mul_4717" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4717"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4717"/>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.2.conv1.weight">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2795" name="onnx::Unsqueeze_4718" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4717, onnx::Unsqueeze_4718"/>
-			</rt_info>
+		<layer id="2651" name="/up_blocks.1/resnets.2/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4718"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2796" name="onnx::Unsqueeze_4723" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4723"/>
-			</rt_info>
+		<layer id="2652" name="Reshape_24585_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1406866642" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4723">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2797" name="onnx::Concat_4724" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4724, onnx::Unsqueeze_4723"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4724">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2798" name="onnx::Reshape_4725" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2653" name="Reshape_24585" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4725"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4725">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2799" name="onnx::MatMul_4726" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4726"/>
-			</rt_info>
+		<layer id="2654" name="/up_blocks.1/resnets.2/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4726">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/conv1/Conv_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2800" name="Constant_148069" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2602660144" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4728, onnx::MatMul_8887"/>
-			</rt_info>
+		<layer id="2655" name="up_blocks.1.resnets.2.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1406869202" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2801" name="onnx::Add_4728" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2656" name="up_blocks.1.resnets.2.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4728, onnx::MatMul_8887"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.2.time_emb_proj.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2657" name="/up_blocks.1/resnets.2/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
@@ -47701,83 +43837,70 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4728">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2802" name="input.684" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2658" name="Constant_87078_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280" offset="1410146002" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2659" name="Constant_87078" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.684"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.684">
-					<dim>2</dim>
-					<dim>256</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2803" name="input.688" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.688"/>
-			</rt_info>
+		<layer id="2660" name="/up_blocks.1/resnets.2/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.688">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2804" name="Constant_18366" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18366"/>
-			</rt_info>
+		<layer id="2661" name="/up_blocks.1/resnets.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.2/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2805" name="onnx::Mul_4739" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4735, onnx::Div_4738, onnx::Mul_4739, onnx::Pow_4732, onnx::ReduceMean_4734, onnx::Sqrt_4737, onnx::Sub_4731"/>
-			</rt_info>
+		<layer id="2662" name="/up_blocks.1/resnets.2/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -47785,1293 +43908,1072 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4739">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/Unsqueeze_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2806" name="Constant_150478" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2609213744" size="5120"/>
+		<layer id="2663" name="/up_blocks.1/resnets.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.2/Constant_1_output_0">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2807" name="onnx::Add_4740" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4740"/>
-			</rt_info>
+		<layer id="2664" name="/up_blocks.1/resnets.2/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4740">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/Unsqueeze_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2808" name="Constant_150479" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2609218864" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2809" name="onnx::MatMul_4741" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4741"/>
-			</rt_info>
+		<layer id="2665" name="/up_blocks.1/resnets.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4741">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/Add_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2810" name="Constant_148077" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2609223984" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8888, q.135"/>
-			</rt_info>
+		<layer id="2666" name="/up_blocks.1/resnets.2/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/resnets.2/norm2/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2811" name="q.135" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8888, q.135"/>
-			</rt_info>
+		<layer id="2667" name="/up_blocks.1/resnets.2/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.135">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2812" name="Constant_108027" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2668" name="Constant_24633" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2813" name="onnx::Gather_4748" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4748, onnx::Gather_4751, onnx::Gather_4754"/>
-			</rt_info>
+		<layer id="2669" name="MVN_24634" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4748,onnx::Gather_4751,onnx::Gather_4754">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm2/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2814" name="onnx::Gather_4755" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4755"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4755"/>
-			</output>
-		</layer>
-		<layer id="2815" name="Constant_18392" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18392"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2816" name="onnx::Div_4756" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18392, onnx::Div_4756, onnx::Gather_4755"/>
-			</rt_info>
+		<layer id="2670" name="/up_blocks.1/resnets.2/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_4756"/>
-			</output>
-		</layer>
-		<layer id="2817" name="onnx::Div_4757" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4757"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4757"/>
+				<port id="1" precision="I64" names="/up_blocks.1/resnets.2/norm2/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2818" name="onnx::Cast_4758" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4758, onnx::Cast_4759, onnx::Div_4757, onnx::Unsqueeze_4760"/>
-			</rt_info>
+		<layer id="2671" name="/up_blocks.1/resnets.2/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4758,onnx::Cast_4759,onnx::Unsqueeze_4760"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2819" name="onnx::Unsqueeze_4768" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4768"/>
-			</rt_info>
+		<layer id="2672" name="Constant_87079_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1410148562" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4768">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2820" name="onnx::Concat_4769" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4769, onnx::Unsqueeze_4768"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4769">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2821" name="onnx::Reshape_4770" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2673" name="Constant_87079" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4770"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2822" name="onnx::Transpose_4771" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4771"/>
-			</rt_info>
+		<layer id="2674" name="/up_blocks.1/resnets.2/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4771">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm2/Mul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2823" name="Constant_18505" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18505"/>
-			</rt_info>
+		<layer id="2675" name="Constant_87080_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1410151122" size="2560" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2824" name="onnx::Reshape_4772" type="Transpose" version="opset1">
+		<layer id="2676" name="Constant_87080" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4772"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4772">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2825" name="onnx::Gather_4749" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4749"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4749"/>
-			</output>
-		</layer>
-		<layer id="2826" name="Constant_18384" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18384"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2827" name="onnx::Unsqueeze_4750" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18384, onnx::Gather_4749, onnx::Unsqueeze_4750"/>
-			</rt_info>
+		<layer id="2677" name="/up_blocks.1/resnets.2/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4750"/>
-			</output>
-		</layer>
-		<layer id="2828" name="onnx::Mul_4773" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4773"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4773"/>
-			</output>
-		</layer>
-		<layer id="2829" name="onnx::Unsqueeze_4774" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4773, onnx::Unsqueeze_4774"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4774"/>
-			</output>
-		</layer>
-		<layer id="2830" name="onnx::Unsqueeze_4779" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4779"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4779">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/norm2/Add_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2831" name="onnx::Concat_4780" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4780, onnx::Unsqueeze_4779"/>
-			</rt_info>
+		<layer id="2678" name="/up_blocks.1/resnets.2/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4780">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="/up_blocks.1/resnets.2/nonlinearity_1/Mul_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2832" name="Constant_89004" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18388, onnx::Concat_4782, onnx::Gather_4752, onnx::Unsqueeze_4753, onnx::Unsqueeze_4781"/>
-			</rt_info>
+		<layer id="2679" name="up_blocks.1.resnets.2.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="1410153682" size="29491200" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2833" name="Constant_18388" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18388"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2834" name="onnx::Unsqueeze_4753" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2680" name="up_blocks.1.resnets.2.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18388, onnx::Concat_4782, onnx::Gather_4752, onnx::Unsqueeze_4753, onnx::Unsqueeze_4781"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4782">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="up_blocks.1.resnets.2.conv2.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2835" name="onnx::Div_4775" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4775"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4775"/>
-			</output>
-		</layer>
-		<layer id="2836" name="onnx::Cast_4776" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4776, onnx::Cast_4777, onnx::Div_4775, onnx::Unsqueeze_4778"/>
-			</rt_info>
+		<layer id="2681" name="/up_blocks.1/resnets.2/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4776,onnx::Cast_4777,onnx::Unsqueeze_4778"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2837" name="onnx::Unsqueeze_4783" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4783"/>
-			</rt_info>
+		<layer id="2682" name="Reshape_24758_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1439644882" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4783">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2838" name="onnx::Concat_4784" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4784, onnx::Unsqueeze_4783"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4784">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2839" name="onnx::Reshape_4785" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2683" name="Reshape_24758" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4785"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4785">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2840" name="q.139" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.139"/>
-			</rt_info>
+		<layer id="2684" name="/up_blocks.1/resnets.2/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.139">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2841" name="Constant_148084" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="2615777584" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.135, onnx::MatMul_8889"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/conv2/Conv_output_0">
+					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2842" name="k.135" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.135, onnx::MatMul_8889"/>
-			</rt_info>
+		<layer id="2685" name="/up_blocks.1/resnets.2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.135">
+				<port id="2" precision="FP32" names="/up_blocks.1/resnets.2/Add_1_output_0,/up_blocks.1/resnets.2/Div_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2843" name="onnx::Transpose_4800" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4800"/>
-			</rt_info>
+		<layer id="2686" name="/up_blocks.1/attentions.2/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/norm/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2687" name="/up_blocks.1/attentions.2/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4800">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/norm/Reshape_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2844" name="Constant_18626" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18626"/>
-			</rt_info>
+		<layer id="2688" name="Constant_24846" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2845" name="onnx::Reshape_4801" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4801"/>
-			</rt_info>
+		<layer id="2689" name="MVN_24847" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4801">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2846" name="k.139" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.139"/>
-			</rt_info>
+		<layer id="2690" name="/up_blocks.1/attentions.2/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.139">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="I64" names="/up_blocks.1/attentions.2/norm/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2847" name="onnx::Mul_4837" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4837"/>
-			</rt_info>
+		<layer id="2691" name="/up_blocks.1/attentions.2/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>10240</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4837">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/norm/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2848" name="Constant_150480" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
+		<layer id="2692" name="Constant_87081_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1439647442" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2849" name="onnx::Softmax_4839" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2693" name="Constant_87081" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_4839"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_4839">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2850" name="attn.67" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.67"/>
-			</rt_info>
+		<layer id="2694" name="/up_blocks.1/attentions.2/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.67">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/norm/Mul_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2851" name="Constant_148091" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="2619709744" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8890, v.135"/>
-			</rt_info>
+		<layer id="2695" name="Constant_87082_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1439650002" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2852" name="v.135" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2696" name="Constant_87082" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8890, v.135"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>768</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.135">
-					<dim>2</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2853" name="onnx::Transpose_4825" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4825"/>
-			</rt_info>
+		<layer id="2697" name="/up_blocks.1/attentions.2/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4825">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/norm/Add_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2854" name="Constant_18634" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18634"/>
-			</rt_info>
+		<layer id="2698" name="up_blocks.1.attentions.2.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="1439652562" size="3276800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2855" name="onnx::Reshape_4826" type="Transpose" version="opset1">
+		<layer id="2699" name="up_blocks.1.attentions.2.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4826"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4826">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32" names="up_blocks.1.attentions.2.proj_in.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2856" name="v.139" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.139"/>
-			</rt_info>
+		<layer id="2700" name="/up_blocks.1/attentions.2/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.139">
+					<dim>1280</dim>
 					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2857" name="out.67" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.67"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.67">
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2858" name="onnx::Gather_4842" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4842, onnx::Gather_4845, onnx::Gather_4848"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4842,onnx::Gather_4845,onnx::Gather_4848">
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2859" name="onnx::Gather_4843" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4843"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4843"/>
-			</output>
-		</layer>
-		<layer id="2860" name="Constant_18646" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18646"/>
-			</rt_info>
+		<layer id="2701" name="Reshape_24969_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1442929362" size="2560" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2861" name="onnx::Div_4844" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18646, onnx::Div_4844, onnx::Gather_4843"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_4844"/>
-			</output>
-		</layer>
-		<layer id="2862" name="onnx::Div_4851" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4851"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_4851"/>
 			</output>
 		</layer>
-		<layer id="2863" name="onnx::Cast_4852" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2702" name="Reshape_24969" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4852, onnx::Cast_4853, onnx::Div_4851, onnx::Unsqueeze_4854"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4852,onnx::Cast_4853,onnx::Unsqueeze_4854"/>
-			</output>
-		</layer>
-		<layer id="2864" name="onnx::Unsqueeze_4856" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4856"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4856">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2865" name="onnx::Concat_4857" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4857, onnx::Unsqueeze_4856"/>
-			</rt_info>
+		<layer id="2703" name="/up_blocks.1/attentions.2/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4857">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/proj_in/Conv_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2866" name="Constant_90754" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4864"/>
-			</rt_info>
+		<layer id="2704" name="Constant_24997" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2867" name="Constant_90755" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2868" name="Gather_90756" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4864"/>
-			</rt_info>
+		<layer id="2705" name="/up_blocks.1/attentions.2/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>2</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/Transpose_output_0">
 					<dim>2</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2869" name="onnx::Reshape_4864" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_4864"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="2706" name="/up_blocks.1/attentions.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4864">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2870" name="onnx::Transpose_4865" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4865"/>
-			</rt_info>
+		<layer id="2707" name="/up_blocks.1/attentions.2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>2</dim>
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>16</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4865">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2871" name="Constant_18767" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18767"/>
-			</rt_info>
+		<layer id="2708" name="Constant_25006" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2872" name="onnx::Reshape_4866" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4866"/>
-			</rt_info>
+		<layer id="2709" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4866">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2873" name="onnx::Div_4867" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4867"/>
-			</rt_info>
+		<layer id="2710" name="Constant_87083_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1442931922" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4867"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2874" name="onnx::Cast_4868" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2711" name="Constant_87083" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_4868, onnx::Cast_4869, onnx::Div_4867, onnx::Unsqueeze_4870"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_4868,onnx::Cast_4869,onnx::Unsqueeze_4870"/>
-			</output>
-		</layer>
-		<layer id="2875" name="onnx::Unsqueeze_4873" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4873"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4873">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2876" name="onnx::Concat_4874" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4874, onnx::Unsqueeze_4873"/>
-			</rt_info>
+		<layer id="2712" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4874">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2877" name="Constant_89031" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18650, onnx::Concat_4876, onnx::Gather_4846, onnx::Unsqueeze_4847, onnx::Unsqueeze_4875"/>
-			</rt_info>
+		<layer id="2713" name="Constant_87084_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1442934482" size="2560" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2878" name="Constant_18650" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2714" name="Constant_87084" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18650"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2879" name="onnx::Unsqueeze_4847" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18650, onnx::Concat_4876, onnx::Gather_4846, onnx::Unsqueeze_4847, onnx::Unsqueeze_4875"/>
-			</rt_info>
+		<layer id="2715" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_4876">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm1/Add_1_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2880" name="onnx::Gather_4849" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4849"/>
-			</rt_info>
+		<layer id="2716" name="Constant_85650_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1442937042" size="3276800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4849"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2881" name="Constant_18654" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2717" name="Constant_85650" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18654"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2882" name="onnx::Unsqueeze_4850" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18654, onnx::Gather_4849, onnx::Unsqueeze_4850"/>
-			</rt_info>
+		<layer id="2718" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_4850"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_q/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2883" name="onnx::Mul_4871" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4871"/>
-			</rt_info>
+		<layer id="2719" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4871"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2884" name="onnx::Unsqueeze_4872" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4871, onnx::Unsqueeze_4872"/>
-			</rt_info>
+		<layer id="2720" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_4872"/>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2885" name="onnx::Unsqueeze_4877" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4877"/>
-			</rt_info>
+		<layer id="2721" name="Constant_25029" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4877">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2886" name="onnx::Concat_4878" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4878, onnx::Unsqueeze_4877"/>
-			</rt_info>
+		<layer id="2722" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_4878">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2887" name="onnx::Reshape_4879" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4879"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2723" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4879">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_1_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2888" name="onnx::MatMul_4880" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4880"/>
-			</rt_info>
+		<layer id="2724" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>8</dim>
+					<dim>256</dim>
 					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -49079,763 +44981,635 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4880">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2889" name="Constant_148098" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2623641904" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4882, onnx::MatMul_8911"/>
-			</rt_info>
+		<layer id="2725" name="Constant_85657_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1446213842" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2890" name="onnx::Add_4882" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2726" name="Constant_85657" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4882, onnx::MatMul_8911"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4882">
-					<dim>2</dim>
-					<dim>256</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2891" name="input.692" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.692"/>
-			</rt_info>
+		<layer id="2727" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1280</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.692">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2892" name="input.696" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.696"/>
-			</rt_info>
+		<layer id="2728" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2729" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.696">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2893" name="Constant_18888" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18888"/>
-			</rt_info>
+		<layer id="2730" name="Constant_25045" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2894" name="onnx::Mul_4893" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4889, onnx::Div_4892, onnx::Mul_4893, onnx::Pow_4886, onnx::ReduceMean_4888, onnx::Sqrt_4891, onnx::Sub_4885"/>
-			</rt_info>
+		<layer id="2731" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4893">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2895" name="Constant_150482" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2630195504" size="5120"/>
+		<layer id="2732" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2896" name="onnx::Add_4894" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4894"/>
-			</rt_info>
+		<layer id="2733" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4894">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2897" name="Constant_150483" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2630200624" size="5120"/>
+		<layer id="2734" name="Constant_87085_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2898" name="onnx::MatMul_4895" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2735" name="Constant_87085" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_4895"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_4895">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2899" name="Constant_148106" type="Const" version="opset1">
-			<data element_type="f32" shape="10240, 1280" offset="2630205744" size="52428800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4897, onnx::MatMul_8912"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>10240</dim>
-					<dim>1280</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2900" name="onnx::Add_4897" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4897, onnx::MatMul_8912"/>
-			</rt_info>
+		<layer id="2736" name="Multiply_86211" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>10240</dim>
-					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4897">
-					<dim>2</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2901" name="onnx::Shape_4898" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_4898"/>
-			</rt_info>
+		<layer id="2737" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_4898">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Add_output_0,/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
 					<dim>256</dim>
-					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2902" name="Constant_127275" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
-			</rt_info>
+		<layer id="2738" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2903" name="Constant_127276" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
-			</rt_info>
+		<layer id="2739" name="Constant_85664_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1449490642" size="3276800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2904" name="Constant_127272" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
+		<layer id="2740" name="Constant_85664" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2905" name="onnx::Gather_4899" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4899"/>
-			</rt_info>
+		<layer id="2741" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4899">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2906" name="onnx::Gather_4900" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4900"/>
-			</rt_info>
+		<layer id="2742" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4900">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2907" name="Constant_18905" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18905"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2908" name="onnx::Add_4901" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_18905, onnx::Add_4901, onnx::Gather_4900"/>
-			</rt_info>
+		<layer id="2743" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_4901">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2909" name="onnx::Add_4903" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4903"/>
-			</rt_info>
+		<layer id="2744" name="Constant_25061" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_4903">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2910" name="onnx::Div_4904" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4903, onnx::Div_4904"/>
-			</rt_info>
+		<layer id="2745" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_4904">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2911" name="onnx::Div_4905" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4905"/>
-			</rt_info>
+		<layer id="2746" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_4905">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2912" name="onnx::Mul_4906" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_4905, onnx::Mul_4906, onnx::Mul_4907, onnx::Slice_4908"/>
-			</rt_info>
+		<layer id="2747" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_4906,onnx::Slice_4908">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2913" name="Constant_127271" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2914" name="ScatterUpdate_127277" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
-			</rt_info>
+		<layer id="2748" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>256</dim>
 				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2915" name="Constant_127280" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
-			</rt_info>
+		<layer id="2749" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2916" name="onnx::Mul_4909" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18916, onnx::Gather_4900, onnx::Mul_4909"/>
-			</rt_info>
+		<layer id="2750" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_4909">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2917" name="Constant_127344" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2918" name="Constant_127343" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
+		<layer id="2751" name="Constant_25087" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2919" name="Constant_127342" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2920" name="ScatterUpdate_127345" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
+		<layer id="2752" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2921" name="Constant_127346" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
+		<layer id="2753" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2922" name="onnx::Mul_4910" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4910"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_4910">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2923" name="onnx::Slice_4911" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4910, onnx::Slice_4911"/>
-			</rt_info>
+		<layer id="2754" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_4911">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2924" name="ScatterUpdate_127347" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2755" name="Constant_85671_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1452767442" size="3276800" />
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2925" name="Constant_127350" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
+		<layer id="2756" name="Constant_85671" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2926" name="onnx::Div_4912" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_18985, onnx::Div_4912, onnx::Gather_4900"/>
-			</rt_info>
+		<layer id="2757" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>10240</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_4912">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2927" name="onnx::Mul_4920" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4915, onnx::Erf_4914, onnx::Mul_4917, onnx::Mul_4918, onnx::Mul_4920"/>
-			</rt_info>
+		<layer id="2758" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_4920">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2928" name="input.700" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.700"/>
-			</rt_info>
+		<layer id="2759" name="/up_blocks.1/attentions.2/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.700">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2929" name="Constant_148114" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 5120" offset="2682634544" size="26214400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4923, onnx::MatMul_8913"/>
-			</rt_info>
+		<layer id="2760" name="Constant_25099" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>5120</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2930" name="onnx::Add_4923" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4923, onnx::MatMul_8913"/>
-			</rt_info>
+		<layer id="2761" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1280</dim>
-					<dim>5120</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4923">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2931" name="onnx::Add_4924" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2762" name="Constant_87087_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1456044242" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2763" name="Constant_87087" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4924"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4924">
-					<dim>2</dim>
-					<dim>256</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2932" name="onnx::Reshape_4925" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4925"/>
-			</rt_info>
+		<layer id="2764" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -49843,1623 +45617,1232 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4925">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2933" name="Constant_90767" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4934"/>
-			</rt_info>
+		<layer id="2765" name="Constant_87088_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1456046802" size="2560" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2934" name="Constant_90768" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="2935" name="Gather_90769" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2766" name="Constant_87088" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4934"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_4934">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2936" name="onnx::Transpose_4935" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_4935"/>
-			</rt_info>
+		<layer id="2767" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_4935">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2937" name="Constant_19150" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19150"/>
-			</rt_info>
+		<layer id="2768" name="Constant_85679_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1456049362" size="3276800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2938" name="input.704" type="Transpose" version="opset1">
+		<layer id="2769" name="Constant_85679" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.704"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.704">
-					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="2939" name="m.up_blocks.1.attentions.1.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="2708848944" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.attentions.1.proj_out.weight"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.attentions.1.proj_out.weight">
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2940" name="Convolution_19152" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_19152"/>
-			</rt_info>
+		<layer id="2770" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2941" name="Reshape_19172" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2715402544" size="5120"/>
+		<layer id="2771" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="154780544" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2942" name="onnx::Add_4937" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19171, Reshape_19172, onnx::Add_4937"/>
-			</rt_info>
+		<layer id="2772" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4937">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2943" name="onnx::Concat_4938" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_4938"/>
-			</rt_info>
+		<layer id="2773" name="Constant_25122" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2774" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_4938">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2944" name="onnx::Cast_4939" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.708, onnx::Cast_4939"/>
-			</rt_info>
+		<layer id="2775" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="154780576" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2776" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.708,onnx::Cast_4939">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2945" name="m.up_blocks.1.resnets.2.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1920, 1, 1" offset="2715407664" size="9830400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.2.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="2777" name="Constant_85686_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="1459326162" size="1966080" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.2.conv_shortcut.weight">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-					<dim>1920</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2946" name="Convolution_19542" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="2778" name="Constant_85686" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_19542"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-					<dim>1920</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2947" name="Reshape_19562" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2725238064" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2948" name="onnx::Add_4984" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19561, Reshape_19562, onnx::Add_4984"/>
-			</rt_info>
+		<layer id="2779" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4984">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2949" name="onnx::Reshape_4941" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4941"/>
-			</rt_info>
+		<layer id="2780" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4941">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2950" name="onnx::InstanceNormalization_4942" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4942"/>
-			</rt_info>
+		<layer id="2781" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>77</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4942">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>15360</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2951" name="Constant_19212" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19212"/>
-			</rt_info>
+		<layer id="2782" name="Constant_25138" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2952" name="MVN_19213" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19232, Concat_19277, MVN_19213, Multiply_19260, Reshape_19233, Reshape_19278, onnx::Reshape_4945"/>
-			</rt_info>
+		<layer id="2783" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>15360</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4945">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>15360</dim>
-				</port>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="2953" name="onnx::Reshape_4946" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4946"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
+		<layer id="2784" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_4946">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2954" name="onnx::Mul_4947" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4947"/>
-			</rt_info>
+		<layer id="2785" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>15360</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4947">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2955" name="Constant_150486" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1920, 1, 1" offset="2725243184" size="7680"/>
+		<layer id="2786" name="Constant_87089_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="158057400" size="2" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1920</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2956" name="onnx::Add_4950" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2787" name="Constant_87089" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4950"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1920</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4950">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2957" name="Constant_150487" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1920, 1, 1" offset="2725250864" size="7680"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1920</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2958" name="onnx::Cast_4953" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.712, onnx::Cast_4953"/>
-			</rt_info>
+		<layer id="2788" name="Multiply_86213" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
 					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1920</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.712,onnx::Cast_4953">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32">
 					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2959" name="input.716" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.716, onnx::Mul_4955"/>
-			</rt_info>
+		<layer id="2789" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.716">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Add_output_0,/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2960" name="m.up_blocks.1.resnets.2.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1920, 3, 3" offset="2725258544" size="88473600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.2.conv1.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.2.conv1.weight">
-					<dim>1280</dim>
-					<dim>1920</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2961" name="Convolution_19318" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_19318"/>
-			</rt_info>
+		<layer id="2790" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
 					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1920</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+					<dim>256</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Softmax_output_0">
 					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2962" name="Reshape_19338" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2813732144" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>256</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2963" name="onnx::Add_4957" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19337, Reshape_19338, onnx::Add_4957"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2791" name="Constant_85693_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 768" offset="1461292242" size="1966080" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4957">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2964" name="onnx::Gemm_4959" type="Swish" version="opset4">
+		<layer id="2792" name="Constant_85693" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_4959, onnx::Mul_4958"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_4959">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2965" name="m.up_blocks.1.resnets.2.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2813737264" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.2.time_emb_proj.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.2.time_emb_proj.weight">
-					<dim>1280</dim>
+				<port id="1" precision="FP32">
 					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2966" name="MatMul_19370" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_19370"/>
-			</rt_info>
+		<layer id="2793" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1280</dim>
-					<dim>1280</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2967" name="Constant_150488" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280" offset="2820290864" size="5120"/>
+		<layer id="2794" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="169859034" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2968" name="onnx::Unsqueeze_4960" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_19371, onnx::Unsqueeze_4960"/>
-			</rt_info>
+		<layer id="2795" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_4960">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2969" name="onnx::Unsqueeze_4961" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4961"/>
-			</rt_info>
+		<layer id="2796" name="Constant_25154" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4961">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2970" name="onnx::Unsqueeze_4962" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4962"/>
-			</rt_info>
+		<layer id="2797" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_4962">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2971" name="onnx::Unsqueeze_4963" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_4963"/>
-			</rt_info>
+		<layer id="2798" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="169859066" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_4963">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2972" name="onnx::Add_4964" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4964"/>
-			</rt_info>
+		<layer id="2799" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4964">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2973" name="onnx::Cast_4965" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.720, onnx::Cast_4965"/>
-			</rt_info>
+		<layer id="2800" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>77</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>160</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.720,onnx::Cast_4965">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/MatMul_1_output_0">
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2974" name="onnx::Reshape_4967" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4967"/>
-			</rt_info>
+		<layer id="2801" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="161334202" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_4967">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2975" name="onnx::InstanceNormalization_4968" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_4968"/>
-			</rt_info>
+		<layer id="2802" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
 					<dim>16</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_4968">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2976" name="Constant_19388" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19388"/>
-			</rt_info>
+		<layer id="2803" name="Constant_25180" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2977" name="MVN_19389" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19408, Concat_19453, MVN_19389, Multiply_19436, Reshape_19409, Reshape_19454, onnx::Reshape_4971"/>
-			</rt_info>
+		<layer id="2804" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>8</dim>
+					<dim>256</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_4971">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2978" name="onnx::Reshape_4972" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_4972"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
+		<layer id="2805" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="151498600" size="24" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_4972">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2979" name="onnx::Mul_4973" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_4973"/>
-			</rt_info>
+		<layer id="2806" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>256</dim>
+					<dim>8</dim>
+					<dim>160</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_4973">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2980" name="Constant_150489" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2820295984" size="5120"/>
+		<layer id="2807" name="Constant_85700_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280" offset="1463258322" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2981" name="onnx::Add_4976" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2808" name="Constant_85700" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_4976"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2809" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4976">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2982" name="Constant_150490" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2820301104" size="5120"/>
-			<output>
+		<layer id="2810" name="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2983" name="onnx::Cast_4979" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.724, onnx::Cast_4979"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.724,onnx::Cast_4979">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2984" name="input.728" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.728, onnx::Mul_4981"/>
-			</rt_info>
+		<layer id="2811" name="/up_blocks.1/attentions.2/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.728">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2985" name="m.up_blocks.1.resnets.2.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="2820306224" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.resnets.2.conv2.weight"/>
-			</rt_info>
+		<layer id="2812" name="Constant_25192" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.resnets.2.conv2.weight">
-					<dim>1280</dim>
-					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2986" name="Convolution_19494" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_19494"/>
-			</rt_info>
+		<layer id="2813" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2987" name="Reshape_19514" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2879288624" size="5120"/>
+		<layer id="2814" name="Constant_87091_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1466535122" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2988" name="onnx::Add_4983" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2815" name="Constant_87091" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19513, Reshape_19514, onnx::Add_4983"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_4983">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2989" name="onnx::Div_4985" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.732, onnx::Div_4985"/>
-			</rt_info>
+		<layer id="2816" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.732,onnx::Div_4985">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2990" name="onnx::Reshape_5000" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5000"/>
-			</rt_info>
+		<layer id="2817" name="Constant_87092_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1280" offset="1466537682" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5000">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2991" name="onnx::InstanceNormalization_5001" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="2818" name="Constant_87092" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5001"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5001">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2992" name="Constant_19618" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19618"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2993" name="MVN_19619" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19638, Concat_19683, MVN_19619, Multiply_19666, Reshape_19639, Reshape_19684, onnx::Reshape_5004"/>
-			</rt_info>
+		<layer id="2819" name="/up_blocks.1/attentions.2/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5004">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2820" name="Constant_85708_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="10240, 1280" offset="1466540242" size="26214400" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>10240</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2994" name="onnx::Reshape_5005" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="2821" name="Constant_85708" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4988, onnx::Gather_4991, onnx::Gather_4994, onnx::Gather_4997, onnx::Reshape_5005"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>10240</dim>
 					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_4988,onnx::Gather_4991,onnx::Gather_4994,onnx::Gather_4997,onnx::Reshape_5005">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>10240</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2995" name="onnx::Mul_5006" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5006"/>
-			</rt_info>
+		<layer id="2822" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>10240</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>10240</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5006">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2996" name="Constant_150491" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2879293744" size="5120"/>
-			<output>
+		<layer id="2823" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="2997" name="onnx::Add_5009" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5009"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5009">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2998" name="Constant_150492" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2879298864" size="5120"/>
+		<layer id="2824" name="Constant_78017" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="2999" name="input.736" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.736"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="2825" name="Constant_78018" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="FP32" names="input.736">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3000" name="m.up_blocks.1.attentions.2.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="2879303984" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.attentions.2.proj_in.weight"/>
-			</rt_info>
+		<layer id="2826" name="Constant_78014" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.attentions.2.proj_in.weight">
-					<dim>1280</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3001" name="Convolution_19721" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_19721"/>
-			</rt_info>
+		<layer id="2827" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="1" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Shape_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3002" name="Reshape_19741" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="2885857584" size="5120"/>
+		<layer id="2828" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3003" name="onnx::Transpose_5013" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_19740, Reshape_19741, onnx::Transpose_5013"/>
-			</rt_info>
+		<layer id="2829" name="Constant_25209" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
+			<output>
+				<port id="0" precision="I64" />
+			</output>
+		</layer>
+		<layer id="2830" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5013">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="3" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Gather_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3004" name="Constant_19769" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19769"/>
-			</rt_info>
+		<layer id="2831" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_2_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3005" name="onnx::Reshape_5014" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5014"/>
-			</rt_info>
+		<layer id="2832" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5014">
-					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-					<dim>1280</dim>
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3006" name="Constant_89076" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19595, onnx::Concat_5017, onnx::Gather_4989, onnx::Unsqueeze_4990, onnx::Unsqueeze_5016"/>
-			</rt_info>
+		<layer id="2833" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3007" name="Constant_19595" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19595"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3008" name="onnx::Unsqueeze_4990" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19595, onnx::Concat_5017, onnx::Gather_4989, onnx::Unsqueeze_4990, onnx::Unsqueeze_5016"/>
-			</rt_info>
+		<layer id="2834" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5017">
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3009" name="onnx::Gather_4995" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4995"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4995"/>
-			</output>
-		</layer>
-		<layer id="3010" name="Constant_19603" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19603"/>
-			</rt_info>
+		<layer id="2835" name="Constant_78013" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3011" name="onnx::Mul_4996" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19603, onnx::Gather_4995, onnx::Mul_4996"/>
-			</rt_info>
+		<layer id="2836" name="ScatterUpdate_78019" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_4996"/>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3012" name="onnx::Gather_4998" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_4998"/>
-			</rt_info>
+		<layer id="2837" name="Constant_78022" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_4998"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3013" name="Constant_19607" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19607"/>
-			</rt_info>
+		<layer id="2838" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="4" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Slice_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3014" name="onnx::Mul_4999" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19607, onnx::Gather_4998, onnx::Mul_4999"/>
-			</rt_info>
-			<input>
+		<layer id="2839" name="Constant_78086" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_4999"/>
 			</output>
 		</layer>
-		<layer id="3015" name="onnx::Unsqueeze_5015" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5015"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="2840" name="Constant_78085" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5015"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3016" name="onnx::Unsqueeze_5018" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5018"/>
-			</rt_info>
+		<layer id="2841" name="Constant_78084" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5018">
+				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3017" name="onnx::Concat_5019" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5019, onnx::Unsqueeze_5018"/>
-			</rt_info>
+		<layer id="2842" name="ScatterUpdate_78087" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5019">
-					<dim>1</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3018" name="Constant_89085" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19599, onnx::Concat_5021, onnx::Gather_4992, onnx::Unsqueeze_4993, onnx::Unsqueeze_5020"/>
-			</rt_info>
+		<layer id="2843" name="Constant_78088" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3019" name="Constant_19599" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19599"/>
-			</rt_info>
+		<layer id="2844" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Constant_5_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3020" name="onnx::Unsqueeze_4993" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19599, onnx::Concat_5021, onnx::Gather_4992, onnx::Unsqueeze_4993, onnx::Unsqueeze_5020"/>
-			</rt_info>
+		<layer id="2845" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5021">
+				<port id="2" precision="I64" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3021" name="onnx::Reshape_5022" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5022"/>
-			</rt_info>
+		<layer id="2846" name="ScatterUpdate_78089" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
@@ -51467,121 +46850,161 @@
 				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5022">
+				<port id="4" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3022" name="input.740" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.740"/>
-			</rt_info>
+		<layer id="2847" name="Constant_78092" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2848" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-					<dim>1280</dim>
+					<dim>256</dim>
+					<dim>10240</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.740">
+				<port id="4" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3023" name="Constant_19854" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19854"/>
-			</rt_info>
+		<layer id="2849" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_3_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3024" name="onnx::Mul_5032" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5028, onnx::Div_5031, onnx::Mul_5032, onnx::Pow_5025, onnx::ReduceMean_5027, onnx::Sqrt_5030, onnx::Sub_5024"/>
-			</rt_info>
+		<layer id="2850" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5032">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3025" name="Constant_150493" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2885862704" size="5120"/>
+		<layer id="2851" name="Constant_85716_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 5120" offset="1492754642" size="13107200" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3026" name="onnx::Add_5033" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2852" name="Constant_85716" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5033"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>5120</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2853" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>1280</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5033">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3027" name="Constant_150494" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2885867824" size="5120"/>
-			<output>
+		<layer id="2854" name="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>256</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3028" name="onnx::MatMul_5034" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5034"/>
-			</rt_info>
+		<layer id="2855" name="/up_blocks.1/attentions.2/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -51589,3634 +47012,3749 @@
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5034">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3029" name="Constant_148125" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2885872944" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8920, q.143"/>
-			</rt_info>
+		<layer id="2856" name="/up_blocks.1/attentions.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="214428690" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.1/attentions.2/Constant_1_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3030" name="q.143" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8920, q.143"/>
-			</rt_info>
+		<layer id="2857" name="/up_blocks.1/attentions.2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.143">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3031" name="Constant_108096" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2858" name="Constant_25374" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3032" name="onnx::Gather_5041" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5041, onnx::Gather_5044, onnx::Gather_5047"/>
-			</rt_info>
+		<layer id="2859" name="/up_blocks.1/attentions.2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 					<dim>1280</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5041,onnx::Gather_5044,onnx::Gather_5047">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3033" name="onnx::Gather_5048" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5048"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5048"/>
-			</output>
-		</layer>
-		<layer id="3034" name="Constant_19880" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19880"/>
-			</rt_info>
+		<layer id="2860" name="up_blocks.1.attentions.2.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 1, 1" offset="1505861842" size="3276800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3035" name="onnx::Div_5049" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2861" name="up_blocks.1.attentions.2.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19880, onnx::Div_5049, onnx::Gather_5048"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5049"/>
-			</output>
-		</layer>
-		<layer id="3036" name="onnx::Div_5050" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5050"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5050"/>
+				<port id="1" precision="FP32" names="up_blocks.1.attentions.2.proj_out.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3037" name="onnx::Cast_5051" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5051, onnx::Cast_5052, onnx::Div_5050, onnx::Unsqueeze_5053"/>
-			</rt_info>
+		<layer id="2862" name="/up_blocks.1/attentions.2/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5051,onnx::Cast_5052,onnx::Unsqueeze_5053"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3038" name="onnx::Unsqueeze_5061" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5061"/>
-			</rt_info>
+		<layer id="2863" name="Reshape_25396_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1509138642" size="2560" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5061">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3039" name="onnx::Concat_5062" type="Unsqueeze" version="opset1">
+		<layer id="2864" name="Reshape_25396" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5062, onnx::Unsqueeze_5061"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5062">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3040" name="onnx::Reshape_5063" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5063"/>
-			</rt_info>
+		<layer id="2865" name="/up_blocks.1/attentions.2/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/proj_out/Conv_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3041" name="onnx::Transpose_5064" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5064"/>
-			</rt_info>
+		<layer id="2866" name="/up_blocks.1/attentions.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5064">
+				<port id="2" precision="FP32" names="/up_blocks.1/attentions.2/Add_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3042" name="Constant_19993" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19993"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3043" name="onnx::Reshape_5065" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5065"/>
-			</rt_info>
+		<layer id="2867" name="ShapeOf_25427" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5065">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3044" name="onnx::Gather_5042" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5042"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5042"/>
-			</output>
-		</layer>
-		<layer id="3045" name="Constant_19872" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19872"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3046" name="onnx::Unsqueeze_5043" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19872, onnx::Gather_5042, onnx::Unsqueeze_5043"/>
-			</rt_info>
+		<layer id="2868" name="Convert_25428" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5043"/>
+				<port id="1" precision="FP32">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3047" name="onnx::Mul_5066" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5066"/>
-			</rt_info>
+		<layer id="2869" name="/up_blocks.1/upsamplers.0/Constant" type="Const" version="opset1">
+			<data element_type="f32" shape="4" offset="992563906" size="16" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5066"/>
+				<port id="0" precision="FP32" names="/up_blocks.1/upsamplers.0/Constant_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3048" name="onnx::Unsqueeze_5067" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5066, onnx::Unsqueeze_5067"/>
-			</rt_info>
+		<layer id="2870" name="Multiply_25429" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>4</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5067"/>
-			</output>
-		</layer>
-		<layer id="3049" name="onnx::Unsqueeze_5072" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5072"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5072">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3050" name="onnx::Concat_5073" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5073, onnx::Unsqueeze_5072"/>
-			</rt_info>
+		<layer id="2871" name="Convert_25430" type="Convert" version="opset1">
+			<data destination_type="i64" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5073">
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3051" name="Constant_89112" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19876, onnx::Concat_5075, onnx::Gather_5045, onnx::Unsqueeze_5046, onnx::Unsqueeze_5074"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3052" name="Constant_19876" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19876"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3053" name="onnx::Unsqueeze_5046" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_19876, onnx::Concat_5075, onnx::Gather_5045, onnx::Unsqueeze_5046, onnx::Unsqueeze_5074"/>
-			</rt_info>
+		<layer id="2872" name="/up_blocks.1/upsamplers.0/Resize" type="Interpolate" version="opset4">
+			<data mode="nearest" shape_calculation_mode="scales" coordinate_transformation_mode="asymmetric" nearest_mode="floor" antialias="false" pads_begin="0, 0, 0, 0" pads_end="0, 0, 0, 0" cube_coeff="-0.75" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>16</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
+				</port>
+				<port id="2" precision="FP32">
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5075">
-					<dim>1</dim>
+				<port id="3" precision="FP32" names="/up_blocks.1/upsamplers.0/Resize_output_0">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3054" name="onnx::Div_5068" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5068"/>
-			</rt_info>
+		<layer id="2873" name="up_blocks.1.upsamplers.0.conv.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1280, 1280, 3, 3" offset="1509141202" size="29491200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5068"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3055" name="onnx::Cast_5069" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2874" name="up_blocks.1.upsamplers.0.conv.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5069, onnx::Cast_5070, onnx::Div_5068, onnx::Unsqueeze_5071"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5069,onnx::Cast_5070,onnx::Unsqueeze_5071"/>
-			</output>
-		</layer>
-		<layer id="3056" name="onnx::Unsqueeze_5076" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5076"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5076">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="up_blocks.1.upsamplers.0.conv.weight">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3057" name="onnx::Concat_5077" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5077, onnx::Unsqueeze_5076"/>
-			</rt_info>
+		<layer id="2875" name="/up_blocks.1/upsamplers.0/conv/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1280</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5077">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3058" name="onnx::Reshape_5078" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5078"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+		<layer id="2876" name="Reshape_25452_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1538632402" size="2560" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>1280</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5078">
-					<dim>3</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="3059" name="q.147" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="2877" name="Reshape_25452" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="q.147"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.147">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3060" name="Constant_148132" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2892426544" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.143, onnx::MatMul_8921"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3061" name="k.143" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.143, onnx::MatMul_8921"/>
-			</rt_info>
+		<layer id="2878" name="/up_blocks.1/upsamplers.0/conv/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1280</dim>
-					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.143">
+				<port id="2" precision="FP32" names="/up_blocks.1/upsamplers.0/conv/Conv_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3062" name="Constant_108165" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3063" name="onnx::Gather_5080" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5080, onnx::Gather_5083, onnx::Gather_5086"/>
-			</rt_info>
+		<layer id="2879" name="/up_blocks.2/Concat" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5080,onnx::Gather_5083,onnx::Gather_5086">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/Concat_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3064" name="onnx::Gather_5087" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5087"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5087"/>
-			</output>
-		</layer>
-		<layer id="3065" name="Constant_20121" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20121"/>
-			</rt_info>
+		<layer id="2880" name="up_blocks.2.resnets.0.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1920, 1, 1" offset="1538634962" size="2457600" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3066" name="onnx::Div_5088" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="2881" name="up_blocks.2.resnets.0.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20121, onnx::Div_5088, onnx::Gather_5087"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5088"/>
-			</output>
-		</layer>
-		<layer id="3067" name="onnx::Div_5089" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5089"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5089"/>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.0.conv_shortcut.weight">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3068" name="onnx::Cast_5090" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5090, onnx::Cast_5091, onnx::Div_5089, onnx::Unsqueeze_5092"/>
-			</rt_info>
+		<layer id="2882" name="/up_blocks.2/resnets.0/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5090,onnx::Cast_5091,onnx::Unsqueeze_5092"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3069" name="onnx::Unsqueeze_5100" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5100"/>
-			</rt_info>
+		<layer id="2883" name="Reshape_25836_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1541092562" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5100">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3070" name="onnx::Concat_5101" type="Unsqueeze" version="opset1">
+		<layer id="2884" name="Reshape_25836" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5101, onnx::Unsqueeze_5100"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5101">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3071" name="onnx::Reshape_5102" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5102"/>
-			</rt_info>
+		<layer id="2885" name="/up_blocks.2/resnets.0/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/conv_shortcut/Conv_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3072" name="onnx::Transpose_5103" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5103"/>
-			</rt_info>
+		<layer id="2886" name="/up_blocks.2/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.0/norm1/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2887" name="/up_blocks.2/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5103">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>61440</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3073" name="Constant_20234" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20234"/>
-			</rt_info>
+		<layer id="2888" name="Constant_25490" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3074" name="onnx::Reshape_5104" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5104"/>
-			</rt_info>
+		<layer id="2889" name="MVN_25491" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>61440</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5104">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>61440</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3075" name="onnx::Gather_5081" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5081"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5081"/>
-			</output>
-		</layer>
-		<layer id="3076" name="Constant_20113" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20113"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3077" name="onnx::Unsqueeze_5082" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20113, onnx::Gather_5081, onnx::Unsqueeze_5082"/>
-			</rt_info>
+		<layer id="2890" name="/up_blocks.2/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5082"/>
-			</output>
-		</layer>
-		<layer id="3078" name="onnx::Mul_5105" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5105"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5105"/>
+				<port id="1" precision="I64" names="/up_blocks.2/resnets.0/norm1/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3079" name="onnx::Unsqueeze_5106" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5105, onnx::Unsqueeze_5106"/>
-			</rt_info>
+		<layer id="2891" name="/up_blocks.2/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>61440</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5106"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm1/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3080" name="onnx::Unsqueeze_5111" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5111"/>
-			</rt_info>
+		<layer id="2892" name="Constant_87095_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1920, 1, 1" offset="1541093842" size="3840" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5111">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3081" name="onnx::Concat_5112" type="Unsqueeze" version="opset1">
+		<layer id="2893" name="Constant_87095" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5112, onnx::Unsqueeze_5111"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5112">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3082" name="Constant_89139" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20117, onnx::Concat_5114, onnx::Gather_5084, onnx::Unsqueeze_5085, onnx::Unsqueeze_5113"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3083" name="Constant_20117" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20117"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3084" name="onnx::Unsqueeze_5085" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20117, onnx::Concat_5114, onnx::Gather_5084, onnx::Unsqueeze_5085, onnx::Unsqueeze_5113"/>
-			</rt_info>
+		<layer id="2894" name="/up_blocks.2/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5114">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3085" name="onnx::Div_5107" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5107"/>
-			</rt_info>
+		<layer id="2895" name="Constant_87096_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1920, 1, 1" offset="1541097682" size="3840" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5107"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3086" name="onnx::Cast_5108" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2896" name="Constant_87096" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5108, onnx::Cast_5109, onnx::Div_5107, onnx::Unsqueeze_5110"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5108,onnx::Cast_5109,onnx::Unsqueeze_5110"/>
-			</output>
-		</layer>
-		<layer id="3087" name="onnx::Unsqueeze_5115" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5115"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5115">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3088" name="onnx::Concat_5116" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5116, onnx::Unsqueeze_5115"/>
-			</rt_info>
+		<layer id="2897" name="/up_blocks.2/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1920</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5116">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm1/Add_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3089" name="onnx::Reshape_5117" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5117"/>
-			</rt_info>
+		<layer id="2898" name="/up_blocks.2/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5117">
+				<port id="1" precision="FP32" names="/up_blocks.2/resnets.0/nonlinearity/Mul_output_0">
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2899" name="up_blocks.2.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1920, 3, 3" offset="1541101522" size="22118400" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3090" name="k.147" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="2900" name="up_blocks.2.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.147"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.147">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.0.conv1.weight">
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3091" name="onnx::Mul_5158" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5158"/>
-			</rt_info>
+		<layer id="2901" name="/up_blocks.2/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>1920</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>640</dim>
+					<dim>1920</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5158">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3092" name="Constant_150495" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
+		<layer id="2902" name="Reshape_25615_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1563219922" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3093" name="onnx::Softmax_5160" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2903" name="Reshape_25615" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_5160"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_5160">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3094" name="attn.71" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.71"/>
-			</rt_info>
+		<layer id="2904" name="/up_blocks.2/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.71">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/conv1/Conv_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3095" name="Constant_148139" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2898980144" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8922, v.143"/>
-			</rt_info>
+		<layer id="2905" name="up_blocks.2.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280" offset="1563221202" size="1638400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3096" name="v.143" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2906" name="up_blocks.2.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8922, v.143"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.0.time_emb_proj.weight">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2907" name="/up_blocks.2/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
+					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.143">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3097" name="Constant_108234" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2908" name="Constant_87097_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640" offset="1564859602" size="1280" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3098" name="onnx::Gather_5119" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="2909" name="Constant_87097" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5119, onnx::Gather_5122, onnx::Gather_5125"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5119,onnx::Gather_5122,onnx::Gather_5125">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3099" name="onnx::Gather_5126" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5126"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5126"/>
-			</output>
-		</layer>
-		<layer id="3100" name="Constant_20362" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20362"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3101" name="onnx::Div_5127" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20362, onnx::Div_5127, onnx::Gather_5126"/>
-			</rt_info>
+		<layer id="2910" name="/up_blocks.2/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5127"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/time_emb_proj/Gemm_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3102" name="onnx::Div_5128" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5128"/>
-			</rt_info>
+		<layer id="2911" name="/up_blocks.2/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5128"/>
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.0/Constant_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3103" name="onnx::Cast_5129" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5129, onnx::Cast_5130, onnx::Div_5128, onnx::Unsqueeze_5131"/>
-			</rt_info>
+		<layer id="2912" name="/up_blocks.2/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5129,onnx::Cast_5130,onnx::Unsqueeze_5131"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/Unsqueeze_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3104" name="onnx::Unsqueeze_5139" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5139"/>
-			</rt_info>
+		<layer id="2913" name="/up_blocks.2/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5139">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.0/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3105" name="onnx::Concat_5140" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5140, onnx::Unsqueeze_5139"/>
-			</rt_info>
+		<layer id="2914" name="/up_blocks.2/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5140">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/Unsqueeze_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3106" name="onnx::Reshape_5141" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5141"/>
-			</rt_info>
+		<layer id="2915" name="/up_blocks.2/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3107" name="onnx::Transpose_5142" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5142"/>
-			</rt_info>
+		<layer id="2916" name="/up_blocks.2/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.0/norm2/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2917" name="/up_blocks.2/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5142">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3108" name="Constant_20475" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20475"/>
-			</rt_info>
+		<layer id="2918" name="Constant_25663" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3109" name="onnx::Reshape_5143" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5143"/>
-			</rt_info>
+		<layer id="2919" name="MVN_25664" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5143">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3110" name="onnx::Gather_5120" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5120"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5120"/>
-			</output>
-		</layer>
-		<layer id="3111" name="Constant_20354" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20354"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3112" name="onnx::Unsqueeze_5121" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20354, onnx::Gather_5120, onnx::Unsqueeze_5121"/>
-			</rt_info>
+		<layer id="2920" name="/up_blocks.2/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5121"/>
-			</output>
-		</layer>
-		<layer id="3113" name="onnx::Mul_5144" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5144"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5144"/>
+				<port id="1" precision="I64" names="/up_blocks.2/resnets.0/norm2/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3114" name="onnx::Unsqueeze_5145" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5144, onnx::Unsqueeze_5145"/>
-			</rt_info>
+		<layer id="2921" name="/up_blocks.2/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5145"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3115" name="onnx::Unsqueeze_5150" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5150"/>
-			</rt_info>
+		<layer id="2922" name="Constant_87098_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1564860882" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5150">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3116" name="onnx::Concat_5151" type="Unsqueeze" version="opset1">
+		<layer id="2923" name="Constant_87098" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5151, onnx::Unsqueeze_5150"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5151">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3117" name="Constant_89166" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20358, onnx::Concat_5153, onnx::Gather_5123, onnx::Unsqueeze_5124, onnx::Unsqueeze_5152"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3118" name="Constant_20358" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20358"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3119" name="onnx::Unsqueeze_5124" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20358, onnx::Concat_5153, onnx::Gather_5123, onnx::Unsqueeze_5124, onnx::Unsqueeze_5152"/>
-			</rt_info>
+		<layer id="2924" name="/up_blocks.2/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5153">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3120" name="onnx::Div_5146" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5146"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5146"/>
-			</output>
-		</layer>
-		<layer id="3121" name="onnx::Cast_5147" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5147, onnx::Cast_5148, onnx::Div_5146, onnx::Unsqueeze_5149"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5147,onnx::Cast_5148,onnx::Unsqueeze_5149"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3122" name="onnx::Unsqueeze_5154" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5154"/>
-			</rt_info>
+		<layer id="2925" name="Constant_87099_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1564862162" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5154">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3123" name="onnx::Concat_5155" type="Unsqueeze" version="opset1">
+		<layer id="2926" name="Constant_87099" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5155, onnx::Unsqueeze_5154"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5155">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3124" name="onnx::Reshape_5156" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5156"/>
-			</rt_info>
+		<layer id="2927" name="/up_blocks.2/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5156">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/norm2/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3125" name="v.147" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.147"/>
-			</rt_info>
+		<layer id="2928" name="/up_blocks.2/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.147">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32" names="/up_blocks.2/resnets.0/nonlinearity_1/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3126" name="out.71" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.71"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>256</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</input>
+		<layer id="2929" name="up_blocks.2.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="1564863442" size="7372800" />
 			<output>
-				<port id="2" precision="FP32" names="out.71">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3127" name="onnx::Gather_5163" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="2930" name="up_blocks.2.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5163, onnx::Gather_5166, onnx::Gather_5169"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5163,onnx::Gather_5166,onnx::Gather_5169">
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.0.conv2.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3128" name="onnx::Gather_5164" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5164"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5164"/>
-			</output>
-		</layer>
-		<layer id="3129" name="Constant_20600" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20600"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3130" name="onnx::Div_5165" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20600, onnx::Div_5165, onnx::Gather_5164"/>
-			</rt_info>
+		<layer id="2931" name="/up_blocks.2/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5165"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3131" name="onnx::Div_5172" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5172"/>
-			</rt_info>
+		<layer id="2932" name="Reshape_25788_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1572236242" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5172"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3132" name="onnx::Cast_5173" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2933" name="Reshape_25788" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5173, onnx::Cast_5174, onnx::Div_5172, onnx::Unsqueeze_5175"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5173,onnx::Cast_5174,onnx::Unsqueeze_5175"/>
-			</output>
-		</layer>
-		<layer id="3133" name="onnx::Unsqueeze_5177" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5177"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5177">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3134" name="onnx::Concat_5178" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5178, onnx::Unsqueeze_5177"/>
-			</rt_info>
+		<layer id="2934" name="/up_blocks.2/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5178">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/conv2/Conv_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3135" name="Constant_90786" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5185"/>
-			</rt_info>
+		<layer id="2935" name="/up_blocks.2/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.0/Add_1_output_0,/up_blocks.2/resnets.0/Div_output_0">
 					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3136" name="Constant_90787" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2936" name="/up_blocks.2/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/norm/Constant_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3137" name="Gather_90788" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5185"/>
-			</rt_info>
+		<layer id="2937" name="/up_blocks.2/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/norm/Reshape_output_0">
 					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3138" name="onnx::Reshape_5185" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5185"/>
-			</rt_info>
-			<input>
+		<layer id="2938" name="Constant_25876" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5185">
-					<dim>4</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="3139" name="onnx::Transpose_5186" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5186"/>
-			</rt_info>
+		<layer id="2939" name="MVN_25877" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5186">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3140" name="Constant_20721" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20721"/>
-			</rt_info>
+		<layer id="2940" name="/up_blocks.2/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64" names="/up_blocks.2/attentions.0/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3141" name="onnx::Reshape_5187" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5187"/>
-			</rt_info>
+		<layer id="2941" name="/up_blocks.2/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5187">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/norm/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3142" name="onnx::Div_5188" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5188"/>
-			</rt_info>
+		<layer id="2942" name="Constant_87100_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1572237522" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5188"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3143" name="onnx::Cast_5189" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="2943" name="Constant_87100" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5189, onnx::Cast_5190, onnx::Div_5188, onnx::Unsqueeze_5191"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5189,onnx::Cast_5190,onnx::Unsqueeze_5191"/>
-			</output>
-		</layer>
-		<layer id="3144" name="onnx::Unsqueeze_5194" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5194"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5194">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3145" name="onnx::Concat_5195" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5195, onnx::Unsqueeze_5194"/>
-			</rt_info>
+		<layer id="2944" name="/up_blocks.2/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5195">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/norm/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3146" name="Constant_89193" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20604, onnx::Concat_5197, onnx::Gather_5167, onnx::Unsqueeze_5168, onnx::Unsqueeze_5196"/>
-			</rt_info>
+		<layer id="2945" name="Constant_87101_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1572238802" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3147" name="Constant_20604" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="2946" name="Constant_87101" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20604"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3148" name="onnx::Unsqueeze_5168" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20604, onnx::Concat_5197, onnx::Gather_5167, onnx::Unsqueeze_5168, onnx::Unsqueeze_5196"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5197">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3149" name="onnx::Gather_5170" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5170"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5170"/>
-			</output>
-		</layer>
-		<layer id="3150" name="Constant_20608" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20608"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3151" name="onnx::Unsqueeze_5171" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20608, onnx::Gather_5170, onnx::Unsqueeze_5171"/>
-			</rt_info>
+		<layer id="2947" name="/up_blocks.2/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5171"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/norm/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3152" name="onnx::Mul_5192" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5192"/>
-			</rt_info>
+		<layer id="2948" name="up_blocks.2.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="1572240082" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5192"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3153" name="onnx::Unsqueeze_5193" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2949" name="up_blocks.2.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5192, onnx::Unsqueeze_5193"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5193"/>
-			</output>
-		</layer>
-		<layer id="3154" name="onnx::Unsqueeze_5198" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5198"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5198">
+				<port id="1" precision="FP32" names="up_blocks.2.attentions.0.proj_in.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3155" name="onnx::Concat_5199" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5199, onnx::Unsqueeze_5198"/>
-			</rt_info>
+		<layer id="2950" name="/up_blocks.2/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5199">
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2951" name="Reshape_25999_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1573059282" size="1280" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3156" name="onnx::Reshape_5200" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="2952" name="Reshape_25999" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5200"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5200">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3157" name="onnx::MatMul_5201" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5201"/>
-			</rt_info>
+		<layer id="2953" name="/up_blocks.2/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5201">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3158" name="Constant_148146" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2905533744" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5203, onnx::MatMul_8927"/>
-			</rt_info>
+		<layer id="2954" name="Constant_26027" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3159" name="onnx::Add_5203" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5203, onnx::MatMul_8927"/>
-			</rt_info>
+		<layer id="2955" name="/up_blocks.2/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5203">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/Transpose_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3160" name="input.744" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.744"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="2956" name="/up_blocks.2/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="2" precision="FP32" names="input.744">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3161" name="input.748" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.748"/>
-			</rt_info>
+		<layer id="2957" name="/up_blocks.2/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.748">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/Reshape_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3162" name="Constant_20842" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20842"/>
-			</rt_info>
+		<layer id="2958" name="Constant_26036" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3163" name="onnx::Mul_5214" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5210, onnx::Div_5213, onnx::Mul_5214, onnx::Pow_5207, onnx::ReduceMean_5209, onnx::Sqrt_5212, onnx::Sub_5206"/>
-			</rt_info>
+		<layer id="2959" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5214">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3164" name="Constant_150497" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2912087344" size="5120"/>
+		<layer id="2960" name="Constant_87102_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1573060562" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3165" name="onnx::Add_5215" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2961" name="Constant_87102" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5215"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2962" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5215">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3166" name="Constant_150498" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2912092464" size="5120"/>
+		<layer id="2963" name="Constant_87103_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1573061842" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3167" name="onnx::MatMul_5216" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="2964" name="Constant_87103" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5216"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2965" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5216">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3168" name="Constant_148154" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2912097584" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8928, q.151"/>
-			</rt_info>
+		<layer id="2966" name="Constant_85726_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1573063122" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3169" name="q.151" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="2967" name="Constant_85726" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8928, q.151"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2968" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.151">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3170" name="Constant_108303" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="2969" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3171" name="onnx::Gather_5223" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5223, onnx::Gather_5226, onnx::Gather_5229"/>
-			</rt_info>
+		<layer id="2970" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5223,onnx::Gather_5226,onnx::Gather_5229">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3172" name="onnx::Gather_5230" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5230"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5230"/>
-			</output>
-		</layer>
-		<layer id="3173" name="Constant_20868" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20868"/>
-			</rt_info>
+		<layer id="2971" name="Constant_26059" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3174" name="onnx::Div_5231" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20868, onnx::Div_5231, onnx::Gather_5230"/>
-			</rt_info>
+		<layer id="2972" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5231"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3175" name="onnx::Div_5232" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5232"/>
-			</rt_info>
+		<layer id="2973" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5232"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3176" name="onnx::Cast_5233" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5233, onnx::Cast_5234, onnx::Div_5232, onnx::Unsqueeze_5235"/>
-			</rt_info>
+		<layer id="2974" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5233,onnx::Cast_5234,onnx::Unsqueeze_5235"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3177" name="onnx::Unsqueeze_5243" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5243"/>
-			</rt_info>
+		<layer id="2975" name="Constant_85733_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1573882322" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5243">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3178" name="onnx::Concat_5244" type="Unsqueeze" version="opset1">
+		<layer id="2976" name="Constant_85733" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5244, onnx::Unsqueeze_5243"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5244">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3179" name="onnx::Reshape_5245" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5245"/>
-			</rt_info>
+		<layer id="2977" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2978" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3180" name="onnx::Transpose_5246" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5246"/>
-			</rt_info>
+		<layer id="2979" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5246">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1024</dim>
 					<dim>8</dim>
-					<dim>160</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3181" name="Constant_20981" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20981"/>
-			</rt_info>
+		<layer id="2980" name="Constant_26075" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3182" name="onnx::Reshape_5247" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5247"/>
-			</rt_info>
+		<layer id="2981" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
+					<dim>1024</dim>
 					<dim>8</dim>
-					<dim>160</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5247">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3183" name="onnx::Gather_5224" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5224"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5224"/>
-			</output>
-		</layer>
-		<layer id="3184" name="Constant_20860" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20860"/>
-			</rt_info>
+		<layer id="2982" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3185" name="onnx::Unsqueeze_5225" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20860, onnx::Gather_5224, onnx::Unsqueeze_5225"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5225"/>
-			</output>
-		</layer>
-		<layer id="3186" name="onnx::Mul_5248" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5248"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5248"/>
 			</output>
 		</layer>
-		<layer id="3187" name="onnx::Unsqueeze_5249" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5248, onnx::Unsqueeze_5249"/>
-			</rt_info>
+		<layer id="2983" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5249"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3188" name="onnx::Unsqueeze_5254" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5254"/>
-			</rt_info>
+		<layer id="2984" name="Constant_87104_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5254">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3189" name="onnx::Concat_5255" type="Unsqueeze" version="opset1">
+		<layer id="2985" name="Constant_87104" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5255, onnx::Unsqueeze_5254"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5255">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3190" name="Constant_89220" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20864, onnx::Concat_5257, onnx::Gather_5227, onnx::Unsqueeze_5228, onnx::Unsqueeze_5256"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3191" name="Constant_20864" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20864"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3192" name="onnx::Unsqueeze_5228" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_20864, onnx::Concat_5257, onnx::Gather_5227, onnx::Unsqueeze_5228, onnx::Unsqueeze_5256"/>
-			</rt_info>
+		<layer id="2986" name="Multiply_86215" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5257">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3193" name="onnx::Div_5250" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5250"/>
-			</rt_info>
+		<layer id="2987" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5250"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Add_output_0,/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3194" name="onnx::Cast_5251" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5251, onnx::Cast_5252, onnx::Div_5250, onnx::Unsqueeze_5253"/>
-			</rt_info>
+		<layer id="2988" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5251,onnx::Cast_5252,onnx::Unsqueeze_5253"/>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3195" name="onnx::Unsqueeze_5258" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5258"/>
-			</rt_info>
+		<layer id="2989" name="Constant_85740_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1574701522" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5258">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3196" name="onnx::Concat_5259" type="Unsqueeze" version="opset1">
+		<layer id="2990" name="Constant_85740" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5259, onnx::Unsqueeze_5258"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5259">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3197" name="onnx::Reshape_5260" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5260"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5260">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3198" name="q.155" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.155"/>
-			</rt_info>
+		<layer id="2991" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.155">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3199" name="Constant_148161" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="2918651184" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.151, onnx::MatMul_8929"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>768</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3200" name="k.151" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.151, onnx::MatMul_8929"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>768</dim>
-				</port>
-			</input>
+		<layer id="2992" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="2" precision="FP32" names="k.151">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3201" name="onnx::Transpose_5275" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5275"/>
-			</rt_info>
+		<layer id="2993" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5275">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>1024</dim>
 					<dim>8</dim>
-					<dim>160</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3202" name="Constant_21102" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21102"/>
-			</rt_info>
+		<layer id="2994" name="Constant_26091" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3203" name="onnx::Reshape_5276" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5276"/>
-			</rt_info>
+		<layer id="2995" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>1024</dim>
 					<dim>8</dim>
-					<dim>160</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5276">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3204" name="k.155" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.155"/>
-			</rt_info>
+		<layer id="2996" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="2997" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.155">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
 					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3205" name="onnx::Mul_5312" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5312"/>
-			</rt_info>
+		<layer id="2998" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5312">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3206" name="Constant_150499" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="316114004" size="4"/>
+		<layer id="2999" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3207" name="onnx::Softmax_5314" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_5314"/>
-			</rt_info>
+		<layer id="3000" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_5314">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3208" name="attn.75" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.75"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.75">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3209" name="Constant_148168" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 768" offset="2922583344" size="3932160"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8930, v.151"/>
-			</rt_info>
+		<layer id="3001" name="Constant_26117" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>768</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3210" name="v.151" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8930, v.151"/>
-			</rt_info>
+		<layer id="3002" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>768</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.151">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3211" name="onnx::Transpose_5300" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5300"/>
-			</rt_info>
+		<layer id="3003" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3004" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5300">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3212" name="Constant_21110" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21110"/>
-			</rt_info>
+		<layer id="3005" name="Constant_85747_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1575520722" size="819200" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3213" name="onnx::Reshape_5301" type="Transpose" version="opset1">
+		<layer id="3006" name="Constant_85747" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5301"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>160</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5301">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3214" name="v.155" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.155"/>
-			</rt_info>
+		<layer id="3007" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.155">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3215" name="out.75" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.75"/>
-			</rt_info>
+		<layer id="3008" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>77</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.75">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3216" name="onnx::Gather_5317" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5317, onnx::Gather_5320, onnx::Gather_5323"/>
-			</rt_info>
+		<layer id="3009" name="/up_blocks.2/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5317,onnx::Gather_5320,onnx::Gather_5323">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3217" name="onnx::Gather_5318" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5318"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5318"/>
-			</output>
-		</layer>
-		<layer id="3218" name="Constant_21122" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21122"/>
-			</rt_info>
+		<layer id="3010" name="Constant_26129" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3219" name="onnx::Div_5319" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21122, onnx::Div_5319, onnx::Gather_5318"/>
-			</rt_info>
+		<layer id="3011" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5319"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm2/Div_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3220" name="onnx::Div_5326" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5326"/>
-			</rt_info>
+		<layer id="3012" name="Constant_87106_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1576339922" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5326"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3221" name="onnx::Cast_5327" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3013" name="Constant_87106" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5327, onnx::Cast_5328, onnx::Div_5326, onnx::Unsqueeze_5329"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5327,onnx::Cast_5328,onnx::Unsqueeze_5329"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3222" name="onnx::Unsqueeze_5331" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5331"/>
-			</rt_info>
+		<layer id="3014" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3015" name="Constant_87107_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1576341202" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5331">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3223" name="onnx::Concat_5332" type="Unsqueeze" version="opset1">
+		<layer id="3016" name="Constant_87107" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5332, onnx::Unsqueeze_5331"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5332">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3224" name="Constant_90796" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5339"/>
-			</rt_info>
+		<layer id="3017" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3225" name="Constant_90797" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3018" name="Constant_85755_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1576342482" size="819200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3226" name="Gather_90798" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3019" name="Constant_85755" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5339"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3227" name="onnx::Reshape_5339" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5339"/>
-			</rt_info>
+		<layer id="3020" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64">
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="3021" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5339">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3228" name="onnx::Transpose_5340" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5340"/>
-			</rt_info>
+		<layer id="3022" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5340">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3229" name="Constant_21243" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21243"/>
-			</rt_info>
+		<layer id="3023" name="Constant_26152" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3230" name="onnx::Reshape_5341" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5341"/>
-			</rt_info>
+		<layer id="3024" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>8</dim>
-					<dim>256</dim>
-					<dim>160</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5341">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>8</dim>
-					<dim>160</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3231" name="onnx::Div_5342" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5342"/>
-			</rt_info>
+		<layer id="3025" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5342"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3232" name="onnx::Cast_5343" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5343, onnx::Cast_5344, onnx::Div_5342, onnx::Unsqueeze_5345"/>
-			</rt_info>
+		<layer id="3026" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5343,onnx::Cast_5344,onnx::Unsqueeze_5345"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3233" name="onnx::Unsqueeze_5348" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5348"/>
-			</rt_info>
+		<layer id="3027" name="Constant_85762_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="1577161682" size="983040" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5348">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3234" name="onnx::Concat_5349" type="Unsqueeze" version="opset1">
+		<layer id="3028" name="Constant_85762" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5349, onnx::Unsqueeze_5348"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5349">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3235" name="Constant_89247" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21126, onnx::Concat_5351, onnx::Gather_5321, onnx::Unsqueeze_5322, onnx::Unsqueeze_5350"/>
-			</rt_info>
+		<layer id="3029" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3236" name="Constant_21126" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21126"/>
-			</rt_info>
+		<layer id="3030" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3237" name="onnx::Unsqueeze_5322" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21126, onnx::Concat_5351, onnx::Gather_5321, onnx::Unsqueeze_5322, onnx::Unsqueeze_5350"/>
-			</rt_info>
+		<layer id="3031" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5351">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3238" name="onnx::Gather_5324" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5324"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5324"/>
-			</output>
-		</layer>
-		<layer id="3239" name="Constant_21130" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21130"/>
-			</rt_info>
+		<layer id="3032" name="Constant_26168" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3240" name="onnx::Unsqueeze_5325" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21130, onnx::Gather_5324, onnx::Unsqueeze_5325"/>
-			</rt_info>
+		<layer id="3033" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5325"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3241" name="onnx::Mul_5346" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5346"/>
-			</rt_info>
+		<layer id="3034" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5346"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3242" name="onnx::Unsqueeze_5347" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5346, onnx::Unsqueeze_5347"/>
-			</rt_info>
+		<layer id="3035" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5347"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3243" name="onnx::Unsqueeze_5352" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5352"/>
-			</rt_info>
+		<layer id="3036" name="Constant_87108_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5352">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3244" name="onnx::Concat_5353" type="Unsqueeze" version="opset1">
+		<layer id="3037" name="Constant_87108" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5353, onnx::Unsqueeze_5352"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5353">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3038" name="Multiply_86217" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3039" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Add_output_0,/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3040" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3245" name="onnx::Reshape_5354" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3041" name="Constant_85769_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="1578144722" size="983040" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3042" name="Constant_85769" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5354"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3043" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3044" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3045" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3046" name="Constant_26184" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3047" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5354">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3048" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3246" name="onnx::MatMul_5355" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5355"/>
-			</rt_info>
+		<layer id="3049" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
 					<dim>8</dim>
-					<dim>160</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5355">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3050" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3051" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3052" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3247" name="Constant_148175" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280" offset="2926515504" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5357, onnx::MatMul_8951"/>
-			</rt_info>
+		<layer id="3053" name="Constant_26210" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3054" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3055" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3056" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3057" name="Constant_85776_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1579127762" size="819200" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3248" name="onnx::Add_5357" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3058" name="Constant_85776" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5357, onnx::MatMul_8951"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3059" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5357">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3249" name="input.752" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.752"/>
-			</rt_info>
+		<layer id="3060" name="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.752">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3250" name="input.756" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.756"/>
-			</rt_info>
+		<layer id="3061" name="/up_blocks.2/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.756">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3251" name="Constant_21364" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21364"/>
-			</rt_info>
+		<layer id="3062" name="Constant_26222" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3252" name="onnx::Mul_5368" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5364, onnx::Div_5367, onnx::Mul_5368, onnx::Pow_5361, onnx::ReduceMean_5363, onnx::Sqrt_5366, onnx::Sub_5360"/>
-			</rt_info>
+		<layer id="3063" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5368">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3253" name="Constant_150501" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2933069104" size="5120"/>
+		<layer id="3064" name="Constant_87110_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1579946962" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3254" name="onnx::Add_5369" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3065" name="Constant_87110" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5369"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3066" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5369">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3255" name="Constant_150502" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1280" offset="2933074224" size="5120"/>
+		<layer id="3067" name="Constant_87111_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1579948242" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3256" name="onnx::MatMul_5370" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3068" name="Constant_87111" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5370"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3069" name="/up_blocks.2/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5370">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3257" name="Constant_148183" type="Const" version="opset1">
-			<data element_type="f32" shape="10240, 1280" offset="2933079344" size="52428800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5372, onnx::MatMul_8952"/>
-			</rt_info>
+		<layer id="3070" name="Constant_85784_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="5120, 640" offset="1579949522" size="6553600" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>10240</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3258" name="onnx::Add_5372" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3071" name="Constant_85784" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5372, onnx::MatMul_8952"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>5120</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3072" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>10240</dim>
-					<dim>1280</dim>
+					<dim>5120</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5372">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3259" name="onnx::Shape_5373" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_5373"/>
-			</rt_info>
+		<layer id="3073" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>10240</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_5373">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3260" name="Constant_127476" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3074" name="Constant_78218" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3261" name="Constant_127477" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3075" name="Constant_78219" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3262" name="Constant_127473" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3076" name="Constant_78215" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3263" name="onnx::Gather_5374" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5374"/>
-			</rt_info>
+		<layer id="3077" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5374">
+				<port id="1" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3264" name="onnx::Gather_5375" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3078" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5375">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3265" name="Constant_21381" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21381"/>
-			</rt_info>
+		<layer id="3079" name="Constant_26239" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="3266" name="onnx::Add_5376" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21381, onnx::Add_5376, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3080" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -55224,30 +50762,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_5376">
+				<port id="3" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3267" name="onnx::Add_5378" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5378"/>
-			</rt_info>
+		<layer id="3081" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_5378">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3268" name="onnx::Div_5379" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5378, onnx::Div_5379"/>
-			</rt_info>
+		<layer id="3082" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -55257,27 +50789,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_5379">
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3269" name="onnx::Div_5380" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5380"/>
-			</rt_info>
+		<layer id="3083" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5380">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3270" name="onnx::Mul_5381" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5380, onnx::Mul_5381, onnx::Mul_5382, onnx::Slice_5383"/>
-			</rt_info>
+		<layer id="3084" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -55287,26 +50813,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_5381,onnx::Slice_5383">
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3271" name="Constant_127472" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3085" name="Constant_78214" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3272" name="ScatterUpdate_127478" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3086" name="ScatterUpdate_78220" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -55327,27 +50847,21 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3273" name="Constant_127481" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3087" name="Constant_78223" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3274" name="onnx::Mul_5384" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21392, onnx::Gather_5375, onnx::Mul_5384"/>
-			</rt_info>
+		<layer id="3088" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
@@ -55360,50 +50874,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_5384">
+				<port id="4" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3275" name="Constant_127545" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3089" name="Constant_78287" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3276" name="Constant_127544" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3090" name="Constant_78286" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3277" name="Constant_127543" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3091" name="Constant_78285" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3278" name="ScatterUpdate_127546" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3092" name="ScatterUpdate_78288" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -55424,33 +50926,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3279" name="Constant_127547" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3093" name="Constant_78289" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3280" name="onnx::Mul_5385" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5385"/>
-			</rt_info>
+		<layer id="3094" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5385">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3281" name="onnx::Slice_5386" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5385, onnx::Slice_5386"/>
-			</rt_info>
+		<layer id="3095" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -55460,15 +50953,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_5386">
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3282" name="ScatterUpdate_127548" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3096" name="ScatterUpdate_78290" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -55489,27 +50979,21 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3283" name="Constant_127551" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3097" name="Constant_78293" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3284" name="onnx::Div_5387" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_21461, onnx::Div_5387, onnx::Gather_5375"/>
-			</rt_info>
+		<layer id="3098" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>10240</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
@@ -55522,482 +51006,361 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_5387">
+				<port id="4" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3285" name="onnx::Mul_5395" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5390, onnx::Erf_5389, onnx::Mul_5392, onnx::Mul_5393, onnx::Mul_5395"/>
-			</rt_info>
+		<layer id="3099" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_5395">
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3286" name="input.760" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.760"/>
-			</rt_info>
+		<layer id="3100" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
-				</port>
+					<dim>1024</dim>
+					<dim>2560</dim>
+				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.760">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3287" name="Constant_148191" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 5120" offset="2985508144" size="26214400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5398, onnx::MatMul_8953"/>
-			</rt_info>
+		<layer id="3101" name="Constant_85792_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 2560" offset="1586503122" size="3276800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1280</dim>
-					<dim>5120</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3288" name="onnx::Add_5398" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3102" name="Constant_85792" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5398, onnx::MatMul_8953"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3103" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>5120</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>5120</dim>
+					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5398">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3289" name="onnx::Add_5399" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5399"/>
-			</rt_info>
+		<layer id="3104" name="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5399">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3290" name="onnx::Reshape_5400" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5400"/>
-			</rt_info>
+		<layer id="3105" name="/up_blocks.2/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5400">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3291" name="Constant_90809" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5409"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3292" name="Constant_90810" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3293" name="Gather_90811" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5409"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
+		<layer id="3106" name="/up_blocks.2/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="56093768" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5409">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.0/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3294" name="onnx::Transpose_5410" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5410"/>
-			</rt_info>
+		<layer id="3107" name="/up_blocks.2/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>256</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5410">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3295" name="Constant_21626" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21626"/>
-			</rt_info>
+		<layer id="3108" name="Constant_26404" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3296" name="input.764" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.764"/>
-			</rt_info>
+		<layer id="3109" name="/up_blocks.2/attentions.0/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-					<dim>1280</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.764">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3297" name="m.up_blocks.1.attentions.2.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 1, 1" offset="3011722544" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.attentions.2.proj_out.weight"/>
-			</rt_info>
+		<layer id="3110" name="up_blocks.2.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="1589779922" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.attentions.2.proj_out.weight">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3298" name="Convolution_21628" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3111" name="up_blocks.2.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_21628"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3299" name="Reshape_21648" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="3018276144" size="5120"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="1" precision="FP32" names="up_blocks.2.attentions.0.proj_out.weight">
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3300" name="onnx::Add_5412" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_21647, Reshape_21648, onnx::Add_5412"/>
-			</rt_info>
+		<layer id="3112" name="/up_blocks.2/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5412">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3301" name="x.3" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="x.3"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="x.3">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3302" name="ShapeOf_21678" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="ShapeOf_21678"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-			</input>
+		<layer id="3113" name="Reshape_26426_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1590599122" size="1280" />
 			<output>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3303" name="Convert_21679" type="Convert" version="opset1">
-			<data destination_type="f32"/>
+		<layer id="3114" name="Reshape_26426" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convert_21679"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
 				<port id="1" precision="FP32">
-					<dim>4</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3304" name="Multiply_21680" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_21680"/>
-			</rt_info>
+		<layer id="3115" name="/up_blocks.2/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>4</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3305" name="Convert_21681" type="Convert" version="opset1">
-			<data destination_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convert_21681"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>4</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/proj_out/Conv_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3306" name="input.768" type="Interpolate" version="opset4">
-			<data mode="nearest" shape_calculation_mode="scales" coordinate_transformation_mode="asymmetric" nearest_mode="floor" antialias="false" pads_begin="0, 0, 0, 0" pads_end="0, 0, 0, 0" cube_coeff="-0.75"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.768"/>
-			</rt_info>
+		<layer id="3116" name="/up_blocks.2/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>16</dim>
-					<dim>16</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="2" precision="FP32">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="FP32" names="input.768">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.0/Add_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3307" name="m.up_blocks.1.upsamplers.0.conv.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="1280, 1280, 3, 3" offset="3018281264" size="58982400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.1.upsamplers.0.conv.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.1.upsamplers.0.conv.weight">
-					<dim>1280</dim>
-					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3308" name="Convolution_21683" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_21683"/>
-			</rt_info>
+		<layer id="3117" name="/up_blocks.2/Concat_1" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1280</dim>
-					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.2/Concat_1_output_0">
 					<dim>2</dim>
 					<dim>1280</dim>
 					<dim>32</dim>
@@ -56005,50 +51368,41 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3309" name="Reshape_21703" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="3077263664" size="5120"/>
+		<layer id="3118" name="up_blocks.2.resnets.1.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280, 1, 1" offset="1590600402" size="1638400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3310" name="onnx::Concat_5419" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3119" name="up_blocks.2.resnets.1.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_21702, Reshape_21703, onnx::Concat_5419"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_5419">
-					<dim>2</dim>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.1.conv_shortcut.weight">
+					<dim>640</dim>
 					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3311" name="onnx::Cast_5420" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.772, onnx::Cast_5420"/>
-			</rt_info>
+		<layer id="3120" name="/up_blocks.2/resnets.1/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56057,67 +51411,47 @@
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.772,onnx::Cast_5420">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>640</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3312" name="m.up_blocks.2.resnets.0.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1920, 1, 1" offset="3077268784" size="4915200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.0.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="3121" name="Reshape_26811_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1592238802" size="1280" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.0.conv_shortcut.weight">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1920</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3313" name="Convolution_22072" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3122" name="Reshape_26811" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_22072"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1920</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1920</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3314" name="Reshape_22092" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3082183984" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -56125,11 +51459,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3315" name="onnx::Add_5465" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_22091, Reshape_22092, onnx::Add_5465"/>
-			</rt_info>
+		<layer id="3123" name="/up_blocks.2/resnets.1/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56145,7 +51476,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5465">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56153,26 +51484,20 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3316" name="onnx::Reshape_5422" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5422"/>
-			</rt_info>
+		<layer id="3124" name="/up_blocks.2/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5422">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.1/norm1/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3317" name="onnx::InstanceNormalization_5423" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5423"/>
-			</rt_info>
+		<layer id="3125" name="/up_blocks.2/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
@@ -56181,218 +51506,257 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5423">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>61440</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3318" name="Constant_21742" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21742"/>
-			</rt_info>
+		<layer id="3126" name="Constant_26465" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3319" name="MVN_21743" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_21762, Concat_21807, MVN_21743, Multiply_21790, Reshape_21763, Reshape_21808, onnx::Reshape_5426"/>
-			</rt_info>
+		<layer id="3127" name="MVN_26466" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>61440</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5426">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>61440</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3320" name="onnx::Reshape_5427" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5427"/>
-			</rt_info>
+		<layer id="3128" name="/up_blocks.2/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_5427">
+				<port id="1" precision="I64" names="/up_blocks.2/resnets.1/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3321" name="onnx::Mul_5428" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5428"/>
-			</rt_info>
+		<layer id="3129" name="/up_blocks.2/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>61440</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5428">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm1/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3322" name="Constant_150505" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1920, 1, 1" offset="3082186544" size="7680"/>
+		<layer id="3130" name="Constant_87114_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1592240082" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3323" name="onnx::Add_5431" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3131" name="Constant_87114" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5431"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3132" name="/up_blocks.2/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5431">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm1/Mul_output_0">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3324" name="Constant_150506" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1920, 1, 1" offset="3082194224" size="7680"/>
+		<layer id="3133" name="Constant_87115_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1280, 1, 1" offset="1592242642" size="2560" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3325" name="onnx::Cast_5434" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3134" name="Constant_87115" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.776, onnx::Cast_5434"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1280</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3135" name="/up_blocks.2/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.776,onnx::Cast_5434">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm1/Add_output_0">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3326" name="input.780" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.780, onnx::Mul_5436"/>
-			</rt_info>
+		<layer id="3136" name="/up_blocks.2/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.780">
+				<port id="1" precision="FP32" names="/up_blocks.2/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3327" name="m.up_blocks.2.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1920, 3, 3" offset="3082201904" size="44236800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.0.conv1.weight"/>
-			</rt_info>
+		<layer id="3137" name="up_blocks.2.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280, 3, 3" offset="1592245202" size="14745600" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.0.conv1.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3328" name="Convolution_21848" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="3138" name="up_blocks.2.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_21848"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.1.conv1.weight">
+					<dim>640</dim>
+					<dim>1280</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3139" name="/up_blocks.2/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>640</dim>
-					<dim>1920</dim>
+					<dim>1280</dim>
 					<dim>3</dim>
 					<dim>3</dim>
 				</port>
@@ -56406,10 +51770,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3329" name="Reshape_21868" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3126438704" size="2560"/>
+		<layer id="3140" name="Reshape_26590_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1606990802" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -56417,11 +51781,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3330" name="onnx::Add_5438" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3141" name="Reshape_26590" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_21867, Reshape_21868, onnx::Add_5438"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3142" name="/up_blocks.2/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56437,7 +51820,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5438">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56445,40 +51828,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3331" name="onnx::Gemm_5440" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_5440, onnx::Mul_5439"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="3143" name="up_blocks.2.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280" offset="1606992082" size="1638400" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_5440">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3332" name="m.up_blocks.2.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280" offset="3126441264" size="3276800"/>
+		<layer id="3144" name="up_blocks.2.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.0.time_emb_proj.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.0.time_emb_proj.weight">
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.1.time_emb_proj.weight">
 					<dim>640</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3333" name="MatMul_21900" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_21900"/>
-			</rt_info>
+		<layer id="3145" name="/up_blocks.2/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56496,20 +51874,35 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3334" name="Constant_150507" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640" offset="3129718064" size="2560"/>
+		<layer id="3146" name="Constant_87116_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640" offset="1608630482" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3335" name="onnx::Unsqueeze_5441" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3147" name="Constant_87116" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_21901, onnx::Unsqueeze_5441"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3148" name="/up_blocks.2/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56521,27 +51914,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_5441">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3336" name="onnx::Unsqueeze_5442" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5442"/>
-			</rt_info>
+		<layer id="3149" name="/up_blocks.2/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5442">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.1/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3337" name="onnx::Unsqueeze_5443" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5443"/>
-			</rt_info>
+		<layer id="3150" name="/up_blocks.2/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56552,28 +51939,22 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_5443">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/Unsqueeze_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3338" name="onnx::Unsqueeze_5444" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5444"/>
-			</rt_info>
+		<layer id="3151" name="/up_blocks.2/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5444">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.1/Constant_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3339" name="onnx::Add_5445" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5445"/>
-			</rt_info>
+		<layer id="3152" name="/up_blocks.2/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56585,7 +51966,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5445">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/Unsqueeze_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -56593,11 +51974,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3340" name="onnx::Cast_5446" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.784, onnx::Cast_5446"/>
-			</rt_info>
+		<layer id="3153" name="/up_blocks.2/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56613,7 +51991,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.784,onnx::Cast_5446">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56621,22 +51999,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3341" name="onnx::Reshape_5448" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5448"/>
-			</rt_info>
+		<layer id="3154" name="/up_blocks.2/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5448">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.1/norm2/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3342" name="onnx::InstanceNormalization_5449" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5449"/>
-			</rt_info>
+		<layer id="3155" name="/up_blocks.2/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56649,29 +52021,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5449">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3343" name="Constant_21918" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_21918"/>
-			</rt_info>
+		<layer id="3156" name="Constant_26638" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3344" name="MVN_21919" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_21938, Concat_21983, MVN_21919, Multiply_21966, Reshape_21939, Reshape_21984, onnx::Reshape_5452"/>
-			</rt_info>
+		<layer id="3157" name="MVN_26639" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56683,18 +52049,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5452">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3345" name="onnx::Reshape_5453" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5453"/>
-			</rt_info>
+		<layer id="3158" name="/up_blocks.2/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56704,16 +52067,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_5453">
+				<port id="1" precision="I64" names="/up_blocks.2/resnets.1/norm2/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3346" name="onnx::Mul_5454" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5454"/>
-			</rt_info>
+		<layer id="3159" name="/up_blocks.2/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56725,7 +52085,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5454">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56733,10 +52093,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3347" name="Constant_150508" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3129720624" size="2560"/>
+		<layer id="3160" name="Constant_87117_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1608631762" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -56744,11 +52104,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3348" name="onnx::Add_5457" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3161" name="Constant_87117" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5457"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3162" name="/up_blocks.2/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56764,7 +52143,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5457">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56772,10 +52151,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3349" name="Constant_150509" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3129723184" size="2560"/>
+		<layer id="3163" name="Constant_87118_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1608633042" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -56783,11 +52162,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3350" name="onnx::Cast_5460" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3164" name="Constant_87118" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.788, onnx::Cast_5460"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3165" name="/up_blocks.2/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56803,7 +52201,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.788,onnx::Cast_5460">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56811,10 +52209,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3351" name="input.792" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.792, onnx::Mul_5462"/>
-			</rt_info>
+		<layer id="3166" name="/up_blocks.2/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56824,7 +52219,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.792">
+				<port id="1" precision="FP32" names="/up_blocks.2/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56832,13 +52227,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3352" name="m.up_blocks.2.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="3129725744" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.0.conv2.weight"/>
-			</rt_info>
+		<layer id="3167" name="up_blocks.2.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="1608634322" size="7372800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.0.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>3</dim>
@@ -56846,11 +52238,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3353" name="Convolution_22024" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="3168" name="up_blocks.2.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_22024"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.1.conv2.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3169" name="/up_blocks.2/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56874,10 +52285,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3354" name="Reshape_22044" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3144471344" size="2560"/>
+		<layer id="3170" name="Reshape_26763_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1616007122" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -56885,11 +52296,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3355" name="onnx::Add_5464" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3171" name="Reshape_26763" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_22043, Reshape_22044, onnx::Add_5464"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3172" name="/up_blocks.2/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56905,7 +52335,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5464">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/conv2/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56913,11 +52343,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3356" name="onnx::Div_5466" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.796, onnx::Div_5466"/>
-			</rt_info>
+		<layer id="3173" name="/up_blocks.2/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56933,7 +52360,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.796,onnx::Div_5466">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.1/Add_1_output_0,/up_blocks.2/resnets.1/Div_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -56941,22 +52368,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3357" name="onnx::Reshape_5481" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5481"/>
-			</rt_info>
+		<layer id="3174" name="/up_blocks.2/attentions.1/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5481">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3358" name="onnx::InstanceNormalization_5482" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5482"/>
-			</rt_info>
+		<layer id="3175" name="/up_blocks.2/attentions.1/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -56969,29 +52390,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5482">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/norm/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3359" name="Constant_22148" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22148"/>
-			</rt_info>
+		<layer id="3176" name="Constant_26851" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3360" name="MVN_22149" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_22168, Concat_22213, MVN_22149, Multiply_22196, Reshape_22169, Reshape_22214, onnx::Reshape_5485"/>
-			</rt_info>
+		<layer id="3177" name="MVN_26852" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57003,18 +52418,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5485">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3361" name="onnx::Reshape_5486" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5469, onnx::Gather_5472, onnx::Gather_5475, onnx::Gather_5478, onnx::Reshape_5486"/>
-			</rt_info>
+		<layer id="3178" name="/up_blocks.2/attentions.1/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57024,16 +52436,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5469,onnx::Gather_5472,onnx::Gather_5475,onnx::Gather_5478,onnx::Reshape_5486">
+				<port id="1" precision="I64" names="/up_blocks.2/attentions.1/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3362" name="onnx::Mul_5487" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5487"/>
-			</rt_info>
+		<layer id="3179" name="/up_blocks.2/attentions.1/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57045,7 +52454,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5487">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -57053,10 +52462,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3363" name="Constant_150510" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3144473904" size="2560"/>
+		<layer id="3180" name="Constant_87119_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1616008402" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -57064,11 +52473,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3364" name="onnx::Add_5490" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3181" name="Constant_87119" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5490"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3182" name="/up_blocks.2/attentions.1/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57084,7 +52512,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5490">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -57092,10 +52520,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3365" name="Constant_150511" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3144476464" size="2560"/>
+		<layer id="3183" name="Constant_87120_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1616009682" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -57103,11 +52531,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3366" name="input.800" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3184" name="Constant_87120" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.800"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3185" name="/up_blocks.2/attentions.1/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57123,7 +52570,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.800">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -57131,13 +52578,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3367" name="m.up_blocks.2.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="3144479024" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.attentions.0.proj_in.weight"/>
-			</rt_info>
+		<layer id="3186" name="up_blocks.2.attentions.1.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="1616010962" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.attentions.0.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -57145,11 +52589,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3368" name="Convolution_22251" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3187" name="up_blocks.2.attentions.1.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_22251"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.2.attentions.1.proj_in.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3188" name="/up_blocks.2/attentions.1/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57173,10 +52636,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3369" name="Reshape_22271" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3146117424" size="2560"/>
+		<layer id="3189" name="Reshape_26974_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1616830162" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -57184,11 +52647,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3370" name="onnx::Transpose_5494" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3190" name="Reshape_26974" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_22270, Reshape_22271, onnx::Transpose_5494"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3191" name="/up_blocks.2/attentions.1/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57204,7 +52686,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5494">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -57212,21 +52694,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3371" name="Constant_22299" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22299"/>
-			</rt_info>
+		<layer id="3192" name="Constant_27002" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3372" name="onnx::Reshape_5495" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5495"/>
-			</rt_info>
+		<layer id="3193" name="/up_blocks.2/attentions.1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57239,7 +52715,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5495">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/Transpose_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>32</dim>
@@ -57247,289 +52723,147 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3373" name="Constant_89292" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22125, onnx::Concat_5498, onnx::Gather_5470, onnx::Unsqueeze_5471, onnx::Unsqueeze_5497"/>
-			</rt_info>
+		<layer id="3194" name="/up_blocks.2/attentions.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3374" name="Constant_22125" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22125"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3375" name="onnx::Unsqueeze_5471" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22125, onnx::Concat_5498, onnx::Gather_5470, onnx::Unsqueeze_5471, onnx::Unsqueeze_5497"/>
-			</rt_info>
+		<layer id="3195" name="/up_blocks.2/attentions.1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5498">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3376" name="onnx::Gather_5476" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5476"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5476"/>
-			</output>
-		</layer>
-		<layer id="3377" name="Constant_22133" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22133"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3378" name="onnx::Mul_5477" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22133, onnx::Gather_5476, onnx::Mul_5477"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_5477"/>
-			</output>
-		</layer>
-		<layer id="3379" name="onnx::Gather_5479" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5479"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5479"/>
 			</output>
 		</layer>
-		<layer id="3380" name="Constant_22137" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22137"/>
-			</rt_info>
+		<layer id="3196" name="Constant_27011" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3381" name="onnx::Mul_5480" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22137, onnx::Gather_5479, onnx::Mul_5480"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_5480"/>
-			</output>
-		</layer>
-		<layer id="3382" name="onnx::Unsqueeze_5496" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5496"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5496"/>
-			</output>
-		</layer>
-		<layer id="3383" name="onnx::Unsqueeze_5499" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5499"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5499">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3384" name="onnx::Concat_5500" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5500, onnx::Unsqueeze_5499"/>
-			</rt_info>
+		<layer id="3197" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5500">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3385" name="Constant_89301" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22129, onnx::Concat_5502, onnx::Gather_5473, onnx::Unsqueeze_5474, onnx::Unsqueeze_5501"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm1/Div_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3386" name="Constant_22129" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22129"/>
-			</rt_info>
+		<layer id="3198" name="Constant_87121_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1616831442" size="1280" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3387" name="onnx::Unsqueeze_5474" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22129, onnx::Concat_5502, onnx::Gather_5473, onnx::Unsqueeze_5474, onnx::Unsqueeze_5501"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5502">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3388" name="onnx::Reshape_5503" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3199" name="Constant_87121" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5503"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5503">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3389" name="input.804" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.804"/>
-			</rt_info>
+		<layer id="3200" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.804">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3390" name="Constant_22384" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22384"/>
-			</rt_info>
+		<layer id="3201" name="Constant_87122_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1616832722" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3391" name="onnx::Mul_5513" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="3202" name="Constant_87122" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5509, onnx::Div_5512, onnx::Mul_5513, onnx::Pow_5506, onnx::ReduceMean_5508, onnx::Sqrt_5511, onnx::Sub_5505"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5513">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3392" name="Constant_150512" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3146119984" size="2560"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3393" name="onnx::Add_5514" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5514"/>
-			</rt_info>
+		<layer id="3203" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57543,65 +52877,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5514">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3394" name="Constant_150513" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3146122544" size="2560"/>
+		<layer id="3204" name="Constant_85802_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1616834002" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3395" name="onnx::MatMul_5515" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3205" name="Constant_85802" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5515"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5515">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3396" name="Constant_148202" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3146125104" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8961, q.159"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3397" name="q.159" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8961, q.159"/>
-			</rt_info>
+		<layer id="3206" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57614,149 +52925,158 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.159">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3398" name="Constant_108372" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3207" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3399" name="onnx::Gather_5522" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5522, onnx::Gather_5525, onnx::Gather_5528"/>
-			</rt_info>
+		<layer id="3208" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5522,onnx::Gather_5525,onnx::Gather_5528">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3400" name="onnx::Gather_5529" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5529"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5529"/>
-			</output>
-		</layer>
-		<layer id="3401" name="Constant_22410" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22410"/>
-			</rt_info>
+		<layer id="3209" name="Constant_27034" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3402" name="onnx::Div_5530" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22410, onnx::Div_5530, onnx::Gather_5529"/>
-			</rt_info>
+		<layer id="3210" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5530"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3403" name="onnx::Div_5531" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5531"/>
-			</rt_info>
+		<layer id="3211" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5531"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3404" name="onnx::Cast_5532" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5532, onnx::Cast_5533, onnx::Div_5531, onnx::Unsqueeze_5534"/>
-			</rt_info>
+		<layer id="3212" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5532,onnx::Cast_5533,onnx::Unsqueeze_5534"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3405" name="onnx::Unsqueeze_5542" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5542"/>
-			</rt_info>
+		<layer id="3213" name="Constant_85809_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1617653202" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5542">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3406" name="onnx::Concat_5543" type="Unsqueeze" version="opset1">
+		<layer id="3214" name="Constant_85809" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5543, onnx::Unsqueeze_5542"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5543">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3407" name="onnx::Reshape_5544" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5544"/>
-			</rt_info>
+		<layer id="3215" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3216" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3408" name="onnx::Transpose_5545" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5545"/>
-			</rt_info>
+		<layer id="3217" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57768,7 +53088,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5545">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -57776,21 +53096,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3409" name="Constant_22523" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22523"/>
-			</rt_info>
+		<layer id="3218" name="Constant_27050" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3410" name="onnx::Reshape_5546" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5546"/>
-			</rt_info>
+		<layer id="3219" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -57803,7 +53117,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5546">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -57811,241 +53125,155 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3411" name="onnx::Gather_5523" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5523"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5523"/>
-			</output>
-		</layer>
-		<layer id="3412" name="Constant_22402" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22402"/>
-			</rt_info>
+		<layer id="3220" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3413" name="onnx::Unsqueeze_5524" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22402, onnx::Gather_5523, onnx::Unsqueeze_5524"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5524"/>
-			</output>
-		</layer>
-		<layer id="3414" name="onnx::Mul_5547" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5547"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5547"/>
 			</output>
 		</layer>
-		<layer id="3415" name="onnx::Unsqueeze_5548" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5547, onnx::Unsqueeze_5548"/>
-			</rt_info>
+		<layer id="3221" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5548"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3416" name="onnx::Unsqueeze_5553" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5553"/>
-			</rt_info>
+		<layer id="3222" name="Constant_87123_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5553">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3417" name="onnx::Concat_5554" type="Unsqueeze" version="opset1">
+		<layer id="3223" name="Constant_87123" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5554, onnx::Unsqueeze_5553"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5554">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3418" name="Constant_89328" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22406, onnx::Concat_5556, onnx::Gather_5526, onnx::Unsqueeze_5527, onnx::Unsqueeze_5555"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3419" name="Constant_22406" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22406"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3420" name="onnx::Unsqueeze_5527" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22406, onnx::Concat_5556, onnx::Gather_5526, onnx::Unsqueeze_5527, onnx::Unsqueeze_5555"/>
-			</rt_info>
+		<layer id="3224" name="Multiply_86219" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5556">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3421" name="onnx::Div_5549" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5549"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5549"/>
-			</output>
-		</layer>
-		<layer id="3422" name="onnx::Cast_5550" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5550, onnx::Cast_5551, onnx::Div_5549, onnx::Unsqueeze_5552"/>
-			</rt_info>
+		<layer id="3225" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5550,onnx::Cast_5551,onnx::Unsqueeze_5552"/>
-			</output>
-		</layer>
-		<layer id="3423" name="onnx::Unsqueeze_5557" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5557"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5557">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Add_output_0,/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3424" name="onnx::Concat_5558" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5558, onnx::Unsqueeze_5557"/>
-			</rt_info>
+		<layer id="3226" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5558">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3425" name="onnx::Reshape_5559" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5559"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3227" name="Constant_85816_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1618472402" size="819200" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5559">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3426" name="q.163" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="3228" name="Constant_85816" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="q.163"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.163">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3427" name="Constant_148209" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3147763504" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.159, onnx::MatMul_8962"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3428" name="k.159" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.159, onnx::MatMul_8962"/>
-			</rt_info>
+		<layer id="3229" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -58058,188 +53286,136 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.159">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3429" name="Constant_108441" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3230" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3430" name="onnx::Gather_5561" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5561, onnx::Gather_5564, onnx::Gather_5567"/>
-			</rt_info>
+		<layer id="3231" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5561,onnx::Gather_5564,onnx::Gather_5567">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3431" name="onnx::Gather_5568" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5568"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5568"/>
-			</output>
-		</layer>
-		<layer id="3432" name="Constant_22651" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22651"/>
-			</rt_info>
+		<layer id="3232" name="Constant_27066" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3433" name="onnx::Div_5569" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22651, onnx::Div_5569, onnx::Gather_5568"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_5569"/>
-			</output>
-		</layer>
-		<layer id="3434" name="onnx::Div_5570" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5570"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5570"/>
 			</output>
 		</layer>
-		<layer id="3435" name="onnx::Cast_5571" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5571, onnx::Cast_5572, onnx::Div_5570, onnx::Unsqueeze_5573"/>
-			</rt_info>
+		<layer id="3233" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5571,onnx::Cast_5572,onnx::Unsqueeze_5573"/>
-			</output>
-		</layer>
-		<layer id="3436" name="onnx::Unsqueeze_5581" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5581"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5581">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3437" name="onnx::Concat_5582" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5582, onnx::Unsqueeze_5581"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3234" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5582">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3438" name="onnx::Reshape_5583" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5583"/>
-			</rt_info>
+		<layer id="3235" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3439" name="onnx::Transpose_5584" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5584"/>
-			</rt_info>
+		<layer id="3236" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
 					<dim>1024</dim>
-					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5584">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>8</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3440" name="Constant_22764" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22764"/>
-			</rt_info>
+		<layer id="3237" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3441" name="onnx::Reshape_5585" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5585"/>
-			</rt_info>
+		<layer id="3238" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>8</dim>
 					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -58247,7 +53423,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5585">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -58255,321 +53431,216 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3442" name="onnx::Gather_5562" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5562"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5562"/>
-			</output>
-		</layer>
-		<layer id="3443" name="Constant_22643" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22643"/>
-			</rt_info>
+		<layer id="3239" name="Constant_27092" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3444" name="onnx::Unsqueeze_5563" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22643, onnx::Gather_5562, onnx::Unsqueeze_5563"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5563"/>
-			</output>
-		</layer>
-		<layer id="3445" name="onnx::Mul_5586" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5586"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5586"/>
 			</output>
 		</layer>
-		<layer id="3446" name="onnx::Unsqueeze_5587" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5586, onnx::Unsqueeze_5587"/>
-			</rt_info>
+		<layer id="3240" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5587"/>
-			</output>
-		</layer>
-		<layer id="3447" name="onnx::Unsqueeze_5592" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5592"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5592">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3448" name="onnx::Concat_5593" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5593, onnx::Unsqueeze_5592"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5593">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3449" name="Constant_89355" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22647, onnx::Concat_5595, onnx::Gather_5565, onnx::Unsqueeze_5566, onnx::Unsqueeze_5594"/>
-			</rt_info>
+		<layer id="3241" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3450" name="Constant_22647" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22647"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3451" name="onnx::Unsqueeze_5566" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22647, onnx::Concat_5595, onnx::Gather_5565, onnx::Unsqueeze_5566, onnx::Unsqueeze_5594"/>
-			</rt_info>
+		<layer id="3242" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5595">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3452" name="onnx::Div_5588" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5588"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5588"/>
-			</output>
-		</layer>
-		<layer id="3453" name="onnx::Cast_5589" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5589, onnx::Cast_5590, onnx::Div_5588, onnx::Unsqueeze_5591"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5589,onnx::Cast_5590,onnx::Unsqueeze_5591"/>
-			</output>
-		</layer>
-		<layer id="3454" name="onnx::Unsqueeze_5596" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5596"/>
-			</rt_info>
+		<layer id="3243" name="Constant_85823_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1619291602" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5596">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3455" name="onnx::Concat_5597" type="Unsqueeze" version="opset1">
+		<layer id="3244" name="Constant_85823" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5597, onnx::Unsqueeze_5596"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5597">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3456" name="onnx::Reshape_5598" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5598"/>
-			</rt_info>
+		<layer id="3245" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5598">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3457" name="k.163" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.163"/>
-			</rt_info>
+		<layer id="3246" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.163">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn1/to_out.0/Add_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3458" name="onnx::Mul_5639" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5639"/>
-			</rt_info>
+		<layer id="3247" name="/up_blocks.2/attentions.1/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5639">
-					<dim>16</dim>
-					<dim>1024</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/Add_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3459" name="Constant_150514" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
+		<layer id="3248" name="Constant_27104" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3460" name="onnx::Softmax_5641" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_5641"/>
-			</rt_info>
+		<layer id="3249" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_5641">
-					<dim>16</dim>
-					<dim>1024</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm2/Div_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3461" name="attn.79" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.79"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</input>
+		<layer id="3250" name="Constant_87125_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1620110802" size="1280" />
 			<output>
-				<port id="1" precision="FP32" names="attn.79">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3462" name="Constant_148216" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3149401904" size="1638400"/>
+		<layer id="3251" name="Constant_87125" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8963, v.159"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3463" name="v.159" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8963, v.159"/>
-			</rt_info>
+		<layer id="3252" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -58577,154 +53648,129 @@
 					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.159">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3464" name="Constant_108510" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3253" name="Constant_87126_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1620112082" size="1280" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3465" name="onnx::Gather_5600" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="3254" name="Constant_87126" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5600, onnx::Gather_5603, onnx::Gather_5606"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5600,onnx::Gather_5603,onnx::Gather_5606">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3466" name="onnx::Gather_5607" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5607"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5607"/>
-			</output>
-		</layer>
-		<layer id="3467" name="Constant_22892" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22892"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3468" name="onnx::Div_5608" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22892, onnx::Div_5608, onnx::Gather_5607"/>
-			</rt_info>
+		<layer id="3255" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_5608"/>
-			</output>
-		</layer>
-		<layer id="3469" name="onnx::Div_5609" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5609"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5609"/>
-			</output>
-		</layer>
-		<layer id="3470" name="onnx::Cast_5610" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5610, onnx::Cast_5611, onnx::Div_5609, onnx::Unsqueeze_5612"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5610,onnx::Cast_5611,onnx::Unsqueeze_5612"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm2/Add_1_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3471" name="onnx::Unsqueeze_5620" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5620"/>
-			</rt_info>
+		<layer id="3256" name="Constant_85831_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1620113362" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5620">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3472" name="onnx::Concat_5621" type="Unsqueeze" version="opset1">
+		<layer id="3257" name="Constant_85831" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5621, onnx::Unsqueeze_5620"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5621">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3473" name="onnx::Reshape_5622" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5622"/>
-			</rt_info>
+		<layer id="3258" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_q/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3259" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3474" name="onnx::Transpose_5623" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5623"/>
-			</rt_info>
+		<layer id="3260" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -58736,7 +53782,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5623">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -58744,21 +53790,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3475" name="Constant_23005" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23005"/>
-			</rt_info>
+		<layer id="3261" name="Constant_27127" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3476" name="onnx::Reshape_5624" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5624"/>
-			</rt_info>
+		<layer id="3262" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -58771,7 +53811,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5624">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>1024</dim>
@@ -58779,453 +53819,352 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3477" name="onnx::Gather_5601" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5601"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5601"/>
-			</output>
-		</layer>
-		<layer id="3478" name="Constant_22884" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22884"/>
-			</rt_info>
+		<layer id="3263" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3479" name="onnx::Unsqueeze_5602" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22884, onnx::Gather_5601, onnx::Unsqueeze_5602"/>
-			</rt_info>
+		<layer id="3264" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5602"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3480" name="onnx::Mul_5625" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5625"/>
-			</rt_info>
+		<layer id="3265" name="Constant_85838_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="1620932562" size="983040" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5625"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3481" name="onnx::Unsqueeze_5626" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3266" name="Constant_85838" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5625, onnx::Unsqueeze_5626"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5626"/>
-			</output>
-		</layer>
-		<layer id="3482" name="onnx::Unsqueeze_5631" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5631"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5631">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3483" name="onnx::Concat_5632" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5632, onnx::Unsqueeze_5631"/>
-			</rt_info>
+		<layer id="3267" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5632">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3484" name="Constant_89382" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22888, onnx::Concat_5634, onnx::Gather_5604, onnx::Unsqueeze_5605, onnx::Unsqueeze_5633"/>
-			</rt_info>
+		<layer id="3268" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3485" name="Constant_22888" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22888"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3486" name="onnx::Unsqueeze_5605" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_22888, onnx::Concat_5634, onnx::Gather_5604, onnx::Unsqueeze_5605, onnx::Unsqueeze_5633"/>
-			</rt_info>
+		<layer id="3269" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5634">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3487" name="onnx::Div_5627" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5627"/>
-			</rt_info>
+		<layer id="3270" name="Constant_27143" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5627"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3488" name="onnx::Cast_5628" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5628, onnx::Cast_5629, onnx::Div_5627, onnx::Unsqueeze_5630"/>
-			</rt_info>
+		<layer id="3271" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5628,onnx::Cast_5629,onnx::Unsqueeze_5630"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3489" name="onnx::Unsqueeze_5635" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5635"/>
-			</rt_info>
+		<layer id="3272" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5635">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3490" name="onnx::Concat_5636" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5636, onnx::Unsqueeze_5635"/>
-			</rt_info>
+		<layer id="3273" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5636">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3274" name="Constant_87127_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3491" name="onnx::Reshape_5637" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3275" name="Constant_87127" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5637"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5637">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3492" name="v.163" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.163"/>
-			</rt_info>
+		<layer id="3276" name="Multiply_86221" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
+					<dim>16</dim>
+					<dim>77</dim>
 					<dim>80</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.163">
+				<port id="2" precision="FP32">
 					<dim>16</dim>
-					<dim>1024</dim>
+					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3493" name="out.79" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.79"/>
-			</rt_info>
+		<layer id="3277" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>16</dim>
-					<dim>1024</dim>
+					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.79">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Add_output_0,/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Mul_output_0">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3494" name="onnx::Gather_5644" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5644, onnx::Gather_5647, onnx::Gather_5650"/>
-			</rt_info>
+		<layer id="3278" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>16</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5644,onnx::Gather_5647,onnx::Gather_5650">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3495" name="onnx::Gather_5645" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5645"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5645"/>
-			</output>
-		</layer>
-		<layer id="3496" name="Constant_23130" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23130"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3497" name="onnx::Div_5646" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23130, onnx::Div_5646, onnx::Gather_5645"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_5646"/>
-			</output>
-		</layer>
-		<layer id="3498" name="onnx::Div_5653" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5653"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_5653"/>
-			</output>
-		</layer>
-		<layer id="3499" name="onnx::Cast_5654" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5654, onnx::Cast_5655, onnx::Div_5653, onnx::Unsqueeze_5656"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5654,onnx::Cast_5655,onnx::Unsqueeze_5656"/>
 			</output>
 		</layer>
-		<layer id="3500" name="onnx::Unsqueeze_5658" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5658"/>
-			</rt_info>
+		<layer id="3279" name="Constant_85845_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="1621915602" size="983040" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5658">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3501" name="onnx::Concat_5659" type="Unsqueeze" version="opset1">
+		<layer id="3280" name="Constant_85845" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5659, onnx::Unsqueeze_5658"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5659">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3502" name="Constant_90828" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5666"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3503" name="Constant_90829" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3504" name="Gather_90830" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5666"/>
-			</rt_info>
+		<layer id="3281" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3505" name="onnx::Reshape_5666" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5666"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="3282" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5666">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_4_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3506" name="onnx::Transpose_5667" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5667"/>
-			</rt_info>
+		<layer id="3283" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5667">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>8</dim>
-					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3507" name="Constant_23251" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23251"/>
-			</rt_info>
+		<layer id="3284" name="Constant_27159" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3508" name="onnx::Reshape_5668" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5668"/>
-			</rt_info>
+		<layer id="3285" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>8</dim>
-					<dim>1024</dim>
 					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -59233,213 +54172,133 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5668">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>8</dim>
+					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3509" name="onnx::Div_5669" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5669"/>
-			</rt_info>
+		<layer id="3286" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5669"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3510" name="onnx::Cast_5670" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5670, onnx::Cast_5671, onnx::Div_5669, onnx::Unsqueeze_5672"/>
-			</rt_info>
+		<layer id="3287" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5670,onnx::Cast_5671,onnx::Unsqueeze_5672"/>
-			</output>
-		</layer>
-		<layer id="3511" name="onnx::Unsqueeze_5675" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5675"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5675">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3512" name="onnx::Concat_5676" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5676, onnx::Unsqueeze_5675"/>
-			</rt_info>
+		<layer id="3288" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5676">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3513" name="Constant_89409" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23134, onnx::Concat_5678, onnx::Gather_5648, onnx::Unsqueeze_5649, onnx::Unsqueeze_5677"/>
-			</rt_info>
+		<layer id="3289" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3514" name="Constant_23134" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23134"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3515" name="onnx::Unsqueeze_5649" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23134, onnx::Concat_5678, onnx::Gather_5648, onnx::Unsqueeze_5649, onnx::Unsqueeze_5677"/>
-			</rt_info>
+		<layer id="3290" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5678">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3516" name="onnx::Gather_5651" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5651"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5651"/>
-			</output>
-		</layer>
-		<layer id="3517" name="Constant_23138" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23138"/>
-			</rt_info>
+		<layer id="3291" name="Constant_27185" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3518" name="onnx::Unsqueeze_5652" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23138, onnx::Gather_5651, onnx::Unsqueeze_5652"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5652"/>
-			</output>
-		</layer>
-		<layer id="3519" name="onnx::Mul_5673" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5673"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5673"/>
 			</output>
 		</layer>
-		<layer id="3520" name="onnx::Unsqueeze_5674" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5673, onnx::Unsqueeze_5674"/>
-			</rt_info>
+		<layer id="3292" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5674"/>
-			</output>
-		</layer>
-		<layer id="3521" name="onnx::Unsqueeze_5679" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5679"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5679">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3522" name="onnx::Concat_5680" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5680, onnx::Unsqueeze_5679"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5680">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3523" name="onnx::Reshape_5681" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5681"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3293" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5681">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3524" name="onnx::MatMul_5682" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5682"/>
-			</rt_info>
+		<layer id="3294" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59452,30 +54311,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5682">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3525" name="Constant_148223" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3151040304" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5684, onnx::MatMul_8968"/>
-			</rt_info>
+		<layer id="3295" name="Constant_85852_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1622898642" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3526" name="onnx::Add_5684" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3296" name="Constant_85852" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5684, onnx::MatMul_8968"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3297" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59488,18 +54359,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5684">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3527" name="input.808" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.808"/>
-			</rt_info>
+		<layer id="3298" name="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -59513,18 +54381,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.808">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3528" name="input.812" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.812"/>
-			</rt_info>
+		<layer id="3299" name="/up_blocks.2/attentions.1/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59538,29 +54403,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.812">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3529" name="Constant_23372" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23372"/>
-			</rt_info>
+		<layer id="3300" name="Constant_27197" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3530" name="onnx::Mul_5695" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5691, onnx::Div_5694, onnx::Mul_5695, onnx::Pow_5688, onnx::ReduceMean_5690, onnx::Sqrt_5693, onnx::Sub_5687"/>
-			</rt_info>
+		<layer id="3301" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59572,28 +54431,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5695">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3531" name="Constant_150516" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3152678704" size="2560"/>
+		<layer id="3302" name="Constant_87129_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1623717842" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3532" name="onnx::Add_5696" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3303" name="Constant_87129" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5696"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3304" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59607,28 +54483,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5696">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3533" name="Constant_150517" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3152681264" size="2560"/>
+		<layer id="3305" name="Constant_87130_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1623719122" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3534" name="onnx::MatMul_5697" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3306" name="Constant_87130" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5697"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3307" name="/up_blocks.2/attentions.1/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59642,30 +54535,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5697">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3535" name="Constant_148231" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3152683824" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8969, q.167"/>
-			</rt_info>
+		<layer id="3308" name="Constant_85860_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="5120, 640" offset="1623720402" size="6553600" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3536" name="q.167" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3309" name="Constant_85860" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8969, q.167"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>5120</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3310" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -59673,316 +54578,246 @@
 					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>640</dim>
+					<dim>5120</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.167">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3537" name="Constant_108579" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3538" name="onnx::Gather_5704" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5704, onnx::Gather_5707, onnx::Gather_5710"/>
-			</rt_info>
+		<layer id="3311" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5704,onnx::Gather_5707,onnx::Gather_5710">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/proj/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3539" name="onnx::Gather_5711" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5711"/>
-			</rt_info>
+		<layer id="3312" name="Constant_78419" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5711"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3540" name="Constant_23398" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23398"/>
-			</rt_info>
+		<layer id="3313" name="Constant_78420" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3541" name="onnx::Div_5712" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23398, onnx::Div_5712, onnx::Gather_5711"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_5712"/>
 			</output>
 		</layer>
-		<layer id="3542" name="onnx::Div_5713" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5713"/>
-			</rt_info>
+		<layer id="3314" name="Constant_78416" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5713"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3543" name="onnx::Cast_5714" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5714, onnx::Cast_5715, onnx::Div_5713, onnx::Unsqueeze_5716"/>
-			</rt_info>
+		<layer id="3315" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5714,onnx::Cast_5715,onnx::Unsqueeze_5716"/>
+				<port id="1" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Shape_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3544" name="onnx::Unsqueeze_5724" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5724"/>
-			</rt_info>
+		<layer id="3316" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5724">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3545" name="onnx::Concat_5725" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5725, onnx::Unsqueeze_5724"/>
-			</rt_info>
+		<layer id="3317" name="Constant_27214" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
+			<output>
+				<port id="0" precision="I64" />
+			</output>
+		</layer>
+		<layer id="3318" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5725">
+				<port id="3" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3546" name="onnx::Reshape_5726" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5726"/>
-			</rt_info>
+		<layer id="3319" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_2_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3320" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3547" name="onnx::Transpose_5727" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5727"/>
-			</rt_info>
+		<layer id="3321" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_3_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3322" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5727">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3548" name="Constant_23511" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23511"/>
-			</rt_info>
+		<layer id="3323" name="Constant_78415" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I32">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3549" name="onnx::Reshape_5728" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5728"/>
-			</rt_info>
+		<layer id="3324" name="ScatterUpdate_78421" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5728">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3550" name="onnx::Gather_5705" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5705"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5705"/>
-			</output>
-		</layer>
-		<layer id="3551" name="Constant_23390" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23390"/>
-			</rt_info>
+		<layer id="3325" name="Constant_78424" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3552" name="onnx::Unsqueeze_5706" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23390, onnx::Gather_5705, onnx::Unsqueeze_5706"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5706"/>
-			</output>
-		</layer>
-		<layer id="3553" name="onnx::Mul_5729" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5729"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5729"/>
 			</output>
 		</layer>
-		<layer id="3554" name="onnx::Unsqueeze_5730" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5729, onnx::Unsqueeze_5730"/>
-			</rt_info>
+		<layer id="3326" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5730"/>
-			</output>
-		</layer>
-		<layer id="3555" name="onnx::Unsqueeze_5735" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5735"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5735">
-					<dim>1</dim>
+				<port id="4" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3556" name="onnx::Concat_5736" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5736, onnx::Unsqueeze_5735"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3327" name="Constant_78488" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5736">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3557" name="Constant_89436" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23394, onnx::Concat_5738, onnx::Gather_5708, onnx::Unsqueeze_5709, onnx::Unsqueeze_5737"/>
-			</rt_info>
+		<layer id="3328" name="Constant_78487" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3558" name="Constant_23394" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23394"/>
-			</rt_info>
+		<layer id="3329" name="Constant_78486" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3559" name="onnx::Unsqueeze_5709" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23394, onnx::Concat_5738, onnx::Gather_5708, onnx::Unsqueeze_5709, onnx::Unsqueeze_5737"/>
-			</rt_info>
+		<layer id="3330" name="ScatterUpdate_78489" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -59990,71 +54825,55 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5738">
+				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3560" name="onnx::Div_5731" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5731"/>
-			</rt_info>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5731"/>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3561" name="onnx::Cast_5732" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5732, onnx::Cast_5733, onnx::Div_5731, onnx::Unsqueeze_5734"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="3331" name="Constant_78490" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5732,onnx::Cast_5733,onnx::Unsqueeze_5734"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3562" name="onnx::Unsqueeze_5739" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5739"/>
-			</rt_info>
+		<layer id="3332" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5739">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3563" name="onnx::Concat_5740" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5740, onnx::Unsqueeze_5739"/>
-			</rt_info>
+		<layer id="3333" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5740">
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3564" name="onnx::Reshape_5741" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5741"/>
-			</rt_info>
+		<layer id="3334" name="ScatterUpdate_78491" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
@@ -60062,923 +54881,969 @@
 				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5741">
+				<port id="4" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3565" name="q.171" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.171"/>
-			</rt_info>
+		<layer id="3335" name="Constant_78494" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3336" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.171">
-					<dim>16</dim>
+				<port id="4" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Slice_1_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3566" name="Constant_148238" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="3154322224" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.167, onnx::MatMul_8970"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3567" name="k.167" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.167, onnx::MatMul_8970"/>
-			</rt_info>
+		<layer id="3337" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.167">
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3568" name="onnx::Transpose_5756" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5756"/>
-			</rt_info>
+		<layer id="3338" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5756">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3569" name="Constant_23632" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23632"/>
-			</rt_info>
+		<layer id="3339" name="Constant_85868_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 2560" offset="1630274002" size="3276800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3570" name="onnx::Reshape_5757" type="Transpose" version="opset1">
+		<layer id="3340" name="Constant_85868" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5757"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3341" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5757">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3571" name="k.171" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.171"/>
-			</rt_info>
+		<layer id="3342" name="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.171">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3572" name="onnx::Mul_5793" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5793"/>
-			</rt_info>
+		<layer id="3343" name="/up_blocks.2/attentions.1/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5793">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/transformer_blocks.0/Add_2_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3573" name="Constant_150518" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
+		<layer id="3344" name="/up_blocks.2/attentions.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="56093768" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.1/Constant_1_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3574" name="onnx::Softmax_5795" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_5795"/>
-			</rt_info>
+		<layer id="3345" name="/up_blocks.2/attentions.1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_5795">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3575" name="attn.83" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.83"/>
-			</rt_info>
+		<layer id="3346" name="Constant_27379" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3347" name="/up_blocks.2/attentions.1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.83">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3576" name="Constant_148245" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="3156288304" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8971, v.167"/>
-			</rt_info>
+		<layer id="3348" name="up_blocks.2.attentions.1.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="1633550802" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-					<dim>768</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3577" name="v.167" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3349" name="up_blocks.2.attentions.1.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8971, v.167"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-					<dim>768</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.167">
-					<dim>2</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32" names="up_blocks.2.attentions.1.proj_out.weight">
 					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3578" name="onnx::Transpose_5781" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5781"/>
-			</rt_info>
+		<layer id="3350" name="/up_blocks.2/attentions.1/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5781">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3579" name="Constant_23640" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23640"/>
-			</rt_info>
+		<layer id="3351" name="Reshape_27401_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1634370002" size="1280" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3580" name="onnx::Reshape_5782" type="Transpose" version="opset1">
+		<layer id="3352" name="Reshape_27401" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5782"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5782">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3581" name="v.171" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.171"/>
-			</rt_info>
+		<layer id="3353" name="/up_blocks.2/attentions.1/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.171">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/proj_out/Conv_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3582" name="out.83" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.83"/>
-			</rt_info>
+		<layer id="3354" name="/up_blocks.2/attentions.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.83">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.1/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3583" name="onnx::Gather_5798" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5798, onnx::Gather_5801, onnx::Gather_5804"/>
-			</rt_info>
+		<layer id="3355" name="/up_blocks.2/Concat_2" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5798,onnx::Gather_5801,onnx::Gather_5804">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/Concat_2_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3584" name="onnx::Gather_5799" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5799"/>
-			</rt_info>
+		<layer id="3356" name="up_blocks.2.resnets.2.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 960, 1, 1" offset="1634371282" size="1228800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5799"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3585" name="Constant_23652" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3357" name="up_blocks.2.resnets.2.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23652"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.2.conv_shortcut.weight">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3586" name="onnx::Div_5800" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23652, onnx::Div_5800, onnx::Gather_5799"/>
-			</rt_info>
+		<layer id="3358" name="/up_blocks.2/resnets.2/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_5800"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3587" name="onnx::Div_5807" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5807"/>
-			</rt_info>
+		<layer id="3359" name="Reshape_27786_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1635600082" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5807"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3588" name="onnx::Cast_5808" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3360" name="Reshape_27786" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5808, onnx::Cast_5809, onnx::Div_5807, onnx::Unsqueeze_5810"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5808,onnx::Cast_5809,onnx::Unsqueeze_5810"/>
-			</output>
-		</layer>
-		<layer id="3589" name="onnx::Unsqueeze_5812" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5812"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5812">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3590" name="onnx::Concat_5813" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5813, onnx::Unsqueeze_5812"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5813">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3591" name="Constant_90838" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5820"/>
-			</rt_info>
+		<layer id="3361" name="/up_blocks.2/resnets.2/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3592" name="Constant_90839" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3362" name="/up_blocks.2/resnets.2/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.2/norm1/Constant_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3593" name="Gather_90840" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5820"/>
-			</rt_info>
+		<layer id="3363" name="/up_blocks.2/resnets.2/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm1/Reshape_output_0">
 					<dim>2</dim>
+					<dim>32</dim>
+					<dim>30720</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3594" name="onnx::Reshape_5820" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_5820"/>
-			</rt_info>
-			<input>
+		<layer id="3364" name="Constant_27440" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5820">
-					<dim>4</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="3595" name="onnx::Transpose_5821" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5821"/>
-			</rt_info>
+		<layer id="3365" name="MVN_27441" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>30720</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5821">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>30720</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3596" name="Constant_23773" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23773"/>
-			</rt_info>
+		<layer id="3366" name="/up_blocks.2/resnets.2/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64" names="/up_blocks.2/resnets.2/norm1/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3597" name="onnx::Reshape_5822" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5822"/>
-			</rt_info>
+		<layer id="3367" name="/up_blocks.2/resnets.2/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>30720</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5822">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm1/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3598" name="onnx::Div_5823" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5823"/>
-			</rt_info>
+		<layer id="3368" name="Constant_87133_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 960, 1, 1" offset="1635601362" size="1920" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5823"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3599" name="onnx::Cast_5824" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3369" name="Constant_87133" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_5824, onnx::Cast_5825, onnx::Div_5823, onnx::Unsqueeze_5826"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_5824,onnx::Cast_5825,onnx::Unsqueeze_5826"/>
-			</output>
-		</layer>
-		<layer id="3600" name="onnx::Unsqueeze_5829" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5829"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5829">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3601" name="onnx::Concat_5830" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5830, onnx::Unsqueeze_5829"/>
-			</rt_info>
+		<layer id="3370" name="/up_blocks.2/resnets.2/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5830">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3602" name="Constant_89463" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23656, onnx::Concat_5832, onnx::Gather_5802, onnx::Unsqueeze_5803, onnx::Unsqueeze_5831"/>
-			</rt_info>
+		<layer id="3371" name="Constant_87134_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 960, 1, 1" offset="1635603282" size="1920" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3603" name="Constant_23656" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3372" name="Constant_87134" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23656"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3604" name="onnx::Unsqueeze_5803" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23656, onnx::Concat_5832, onnx::Gather_5802, onnx::Unsqueeze_5803, onnx::Unsqueeze_5831"/>
-			</rt_info>
+		<layer id="3373" name="/up_blocks.2/resnets.2/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5832">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm1/Add_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3605" name="onnx::Gather_5805" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5805"/>
-			</rt_info>
+		<layer id="3374" name="/up_blocks.2/resnets.2/nonlinearity/Mul" type="Swish" version="opset4">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5805"/>
+				<port id="1" precision="FP32" names="/up_blocks.2/resnets.2/nonlinearity/Mul_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3606" name="Constant_23660" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23660"/>
-			</rt_info>
+		<layer id="3375" name="up_blocks.2.resnets.2.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 960, 3, 3" offset="1635605202" size="11059200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3607" name="onnx::Unsqueeze_5806" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3376" name="up_blocks.2.resnets.2.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23660, onnx::Gather_5805, onnx::Unsqueeze_5806"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5806"/>
-			</output>
-		</layer>
-		<layer id="3608" name="onnx::Mul_5827" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5827"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5827"/>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.2.conv1.weight">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3609" name="onnx::Unsqueeze_5828" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5827, onnx::Unsqueeze_5828"/>
-			</rt_info>
+		<layer id="3377" name="/up_blocks.2/resnets.2/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>960</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5828"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3610" name="onnx::Unsqueeze_5833" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5833"/>
-			</rt_info>
+		<layer id="3378" name="Reshape_27565_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1646664402" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5833">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3611" name="onnx::Concat_5834" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5834, onnx::Unsqueeze_5833"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5834">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3612" name="onnx::Reshape_5835" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3379" name="Reshape_27565" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5835"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5835">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3613" name="onnx::MatMul_5836" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5836"/>
-			</rt_info>
+		<layer id="3380" name="/up_blocks.2/resnets.2/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5836">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/conv1/Conv_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3614" name="Constant_148252" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3158254384" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5838, onnx::MatMul_8992"/>
-			</rt_info>
+		<layer id="3381" name="up_blocks.2.resnets.2.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 1280" offset="1646665682" size="1638400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
+				<port id="0" precision="FP16">
 					<dim>640</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3615" name="onnx::Add_5838" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3382" name="up_blocks.2.resnets.2.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5838, onnx::MatMul_8992"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.2.time_emb_proj.weight">
+					<dim>640</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3383" name="/up_blocks.2/resnets.2/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>640</dim>
-					<dim>640</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5838">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3616" name="input.816" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3384" name="Constant_87135_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640" offset="1648304082" size="1280" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3385" name="Constant_87135" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.816"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.816">
-					<dim>2</dim>
-					<dim>1024</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3617" name="input.820" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.820"/>
-			</rt_info>
+		<layer id="3386" name="/up_blocks.2/resnets.2/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
+					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.820">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3618" name="Constant_23894" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23894"/>
-			</rt_info>
+		<layer id="3387" name="/up_blocks.2/resnets.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.2/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3619" name="onnx::Mul_5849" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5845, onnx::Div_5848, onnx::Mul_5849, onnx::Pow_5842, onnx::ReduceMean_5844, onnx::Sqrt_5847, onnx::Sub_5841"/>
-			</rt_info>
+		<layer id="3388" name="/up_blocks.2/resnets.2/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -60986,752 +55851,522 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5849">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/Unsqueeze_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3620" name="Constant_150520" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3159892784" size="2560"/>
+		<layer id="3389" name="/up_blocks.2/resnets.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.2/Constant_1_output_0">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3621" name="onnx::Add_5850" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5850"/>
-			</rt_info>
+		<layer id="3390" name="/up_blocks.2/resnets.2/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5850">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/Unsqueeze_1_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3622" name="Constant_150521" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3159895344" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3623" name="onnx::MatMul_5851" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5851"/>
-			</rt_info>
+		<layer id="3391" name="/up_blocks.2/resnets.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5851">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/Add_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3624" name="Constant_148260" type="Const" version="opset1">
-			<data element_type="f32" shape="5120, 640" offset="3159897904" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5853, onnx::MatMul_8993"/>
-			</rt_info>
+		<layer id="3392" name="/up_blocks.2/resnets.2/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>5120</dim>
-					<dim>640</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/resnets.2/norm2/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3625" name="onnx::Add_5853" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5853, onnx::MatMul_8993"/>
-			</rt_info>
+		<layer id="3393" name="/up_blocks.2/resnets.2/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>5120</dim>
-					<dim>640</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5853">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3626" name="onnx::Shape_5854" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_5854"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+		<layer id="3394" name="Constant_27613" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64">
 					<dim>1</dim>
-					<dim>5120</dim>
 				</port>
-				<port id="1" precision="FP32">
+			</output>
+		</layer>
+		<layer id="3395" name="MVN_27614" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_5854">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3627" name="Constant_127677" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3628" name="Constant_127678" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+		<layer id="3396" name="/up_blocks.2/resnets.2/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3629" name="Constant_127674" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="I64" names="/up_blocks.2/resnets.2/norm2/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3630" name="onnx::Gather_5855" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5855"/>
-			</rt_info>
+		<layer id="3397" name="/up_blocks.2/resnets.2/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5855">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3631" name="onnx::Gather_5856" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5856"/>
-			</rt_info>
+		<layer id="3398" name="Constant_87136_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1648305362" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5856">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3632" name="Constant_23911" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3399" name="Constant_87136" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23911"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3633" name="onnx::Add_5857" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_23911, onnx::Add_5857, onnx::Gather_5856"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_5857">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3634" name="onnx::Add_5859" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5859"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Add_5859">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3635" name="onnx::Div_5860" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5859, onnx::Div_5860"/>
-			</rt_info>
+		<layer id="3400" name="/up_blocks.2/resnets.2/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_5860">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3636" name="onnx::Div_5861" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5861"/>
-			</rt_info>
+		<layer id="3401" name="Constant_87137_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1648306642" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_5861">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3637" name="onnx::Mul_5862" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3402" name="Constant_87137" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_5861, onnx::Mul_5862, onnx::Mul_5863, onnx::Slice_5864"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_5862,onnx::Slice_5864">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3638" name="Constant_127673" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3639" name="ScatterUpdate_127679" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
+		<layer id="3403" name="/up_blocks.2/resnets.2/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3640" name="Constant_127682" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/norm2/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3641" name="onnx::Mul_5865" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23922, onnx::Gather_5856, onnx::Mul_5865"/>
-			</rt_info>
+		<layer id="3404" name="/up_blocks.2/resnets.2/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_5865">
+				<port id="1" precision="FP32" names="/up_blocks.2/resnets.2/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3642" name="Constant_127746" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3643" name="Constant_127745" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3644" name="Constant_127744" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3645" name="ScatterUpdate_127747" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3405" name="up_blocks.2.resnets.2.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="1648307922" size="7372800" />
 			<output>
-				<port id="4" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3646" name="Constant_127748" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3647" name="onnx::Mul_5866" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5866"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_5866">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3648" name="onnx::Slice_5867" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5866, onnx::Slice_5867"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Slice_5867">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3649" name="ScatterUpdate_127749" type="ScatterUpdate" version="opset3">
+		<layer id="3406" name="up_blocks.2.resnets.2.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
 					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
+				<port id="1" precision="FP32" names="up_blocks.2.resnets.2.conv2.weight">
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3650" name="Constant_127752" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3651" name="onnx::Div_5868" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_23991, onnx::Div_5868, onnx::Gather_5856"/>
-			</rt_info>
+		<layer id="3407" name="/up_blocks.2/resnets.2/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_5868">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3652" name="onnx::Mul_5876" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5871, onnx::Erf_5870, onnx::Mul_5873, onnx::Mul_5874, onnx::Mul_5876"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
+		<layer id="3408" name="Reshape_27738_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1655680722" size="1280" />
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_5876">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3653" name="input.824" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3409" name="Reshape_27738" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.824"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.824">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3654" name="Constant_148268" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 2560" offset="3173005104" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5879, onnx::MatMul_8994"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3655" name="onnx::Add_5879" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5879, onnx::MatMul_8994"/>
-			</rt_info>
+		<layer id="3410" name="/up_blocks.2/resnets.2/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>2560</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5879">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/conv2/Conv_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3656" name="onnx::Add_5880" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5880"/>
-			</rt_info>
+		<layer id="3411" name="/up_blocks.2/resnets.2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>2</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5880">
+				<port id="2" precision="FP32" names="/up_blocks.2/resnets.2/Add_1_output_0,/up_blocks.2/resnets.2/Div_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3657" name="onnx::Reshape_5881" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5881"/>
-			</rt_info>
+		<layer id="3412" name="/up_blocks.2/attentions.2/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/norm/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3413" name="/up_blocks.2/attentions.2/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5881">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/norm/Reshape_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3658" name="Constant_90851" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5890"/>
-			</rt_info>
+		<layer id="3414" name="Constant_27826" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3659" name="Constant_90852" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3660" name="Gather_90853" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5890"/>
-			</rt_info>
+		<layer id="3415" name="MVN_27827" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5890">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/norm/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>20480</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3661" name="onnx::Transpose_5891" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_5891"/>
-			</rt_info>
+		<layer id="3416" name="/up_blocks.2/attentions.2/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5891">
-					<dim>2</dim>
 					<dim>32</dim>
 					<dim>32</dim>
-					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3662" name="Constant_24156" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24156"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64" names="/up_blocks.2/attentions.2/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3663" name="input.828" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.828"/>
-			</rt_info>
+		<layer id="3417" name="/up_blocks.2/attentions.2/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
+					<dim>20480</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.828">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/norm/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -61739,25 +56374,41 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3664" name="m.up_blocks.2.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="3179558704" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.attentions.0.proj_out.weight"/>
-			</rt_info>
+		<layer id="3418" name="Constant_87138_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1655682002" size="1280" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.attentions.0.proj_out.weight">
-					<dim>640</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3665" name="Convolution_24158" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3419" name="Constant_87138" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_24158"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3420" name="/up_blocks.2/attentions.2/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -61766,14 +56417,14 @@
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>640</dim>
+					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/norm/Mul_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -61781,10 +56432,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3666" name="Reshape_24178" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3181197104" size="2560"/>
+		<layer id="3421" name="Constant_87139_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1655683282" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -61792,19 +56443,13 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3667" name="onnx::Add_5893" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3422" name="Constant_87139" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24177, Reshape_24178, onnx::Add_5893"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -61812,19 +56457,16 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5893">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3668" name="onnx::Concat_5894" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5894"/>
-			</rt_info>
+		<layer id="3423" name="/up_blocks.2/attentions.2/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -61833,14 +56475,14 @@
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_5894">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/norm/Add_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -61848,63 +56490,51 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3669" name="onnx::Cast_5895" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.832, onnx::Cast_5895"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+		<layer id="3424" name="up_blocks.2.attentions.2.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="1655684562" size="819200" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.832,onnx::Cast_5895">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3670" name="m.up_blocks.2.resnets.1.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280, 1, 1" offset="3181199664" size="3276800"/>
+		<layer id="3425" name="up_blocks.2.attentions.2.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.1.conv_shortcut.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.1.conv_shortcut.weight">
+				<port id="1" precision="FP32" names="up_blocks.2.attentions.2.proj_in.weight">
+					<dim>640</dim>
 					<dim>640</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3671" name="Convolution_24548" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_24548"/>
-			</rt_info>
+		<layer id="3426" name="/up_blocks.2/attentions.2/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>640</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
@@ -61918,10 +56548,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3672" name="Reshape_24568" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3184476464" size="2560"/>
+		<layer id="3427" name="Reshape_27949_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1656503762" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>640</dim>
 					<dim>1</dim>
@@ -61929,11 +56559,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3673" name="onnx::Add_5940" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3428" name="Reshape_27949" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24567, Reshape_24568, onnx::Add_5940"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3429" name="/up_blocks.2/attentions.2/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -61949,7 +56598,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5940">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/proj_in/Conv_output_0">
 					<dim>2</dim>
 					<dim>640</dim>
 					<dim>32</dim>
@@ -61957,1383 +56606,1005 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3674" name="onnx::Reshape_5897" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5897"/>
-			</rt_info>
+		<layer id="3430" name="Constant_27977" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5897">
-					<dim>3</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3675" name="onnx::InstanceNormalization_5898" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5898"/>
-			</rt_info>
+		<layer id="3431" name="/up_blocks.2/attentions.2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>640</dim>
 					<dim>32</dim>
 					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5898">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/Transpose_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3676" name="Constant_24218" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24218"/>
-			</rt_info>
+		<layer id="3432" name="/up_blocks.2/attentions.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3677" name="MVN_24219" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24238, Concat_24283, MVN_24219, Multiply_24266, Reshape_24239, Reshape_24284, onnx::Reshape_5901"/>
-			</rt_info>
+		<layer id="3433" name="/up_blocks.2/attentions.2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5901">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3678" name="onnx::Reshape_5902" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5902"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
+		<layer id="3434" name="Constant_27986" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_5902">
-					<dim>4</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3679" name="onnx::Mul_5903" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5903"/>
-			</rt_info>
+		<layer id="3435" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5903">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3680" name="Constant_150524" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="3184479024" size="5120"/>
+		<layer id="3436" name="Constant_87140_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1656505042" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3681" name="onnx::Add_5906" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3437" name="Constant_87140" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5906"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3438" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5906">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3682" name="Constant_150525" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1280, 1, 1" offset="3184484144" size="5120"/>
+		<layer id="3439" name="Constant_87141_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1656506322" size="1280" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3683" name="onnx::Cast_5909" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3440" name="Constant_87141" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.836, onnx::Cast_5909"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.836,onnx::Cast_5909">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3684" name="input.840" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.840, onnx::Mul_5911"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="input.840">
-					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3685" name="m.up_blocks.2.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280, 3, 3" offset="3184489264" size="29491200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.1.conv1.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.1.conv1.weight">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3686" name="Convolution_24324" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_24324"/>
-			</rt_info>
+		<layer id="3441" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1280</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3687" name="Reshape_24344" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3213980464" size="2560"/>
+		<layer id="3442" name="Constant_85878_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1656507602" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3688" name="onnx::Add_5913" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3443" name="Constant_85878" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24343, Reshape_24344, onnx::Add_5913"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5913">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3689" name="onnx::Gemm_5915" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_5915, onnx::Mul_5914"/>
-			</rt_info>
+		<layer id="3444" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_5915">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3690" name="m.up_blocks.2.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280" offset="3213983024" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.1.time_emb_proj.weight"/>
-			</rt_info>
+		<layer id="3445" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.1.time_emb_proj.weight">
-					<dim>640</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3691" name="MatMul_24376" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_24376"/>
-			</rt_info>
+		<layer id="3446" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>1280</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3692" name="Constant_150526" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640" offset="3217259824" size="2560"/>
+		<layer id="3447" name="Constant_28009" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3693" name="onnx::Unsqueeze_5916" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_24377, onnx::Unsqueeze_5916"/>
-			</rt_info>
+		<layer id="3448" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_5916">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3694" name="onnx::Unsqueeze_5917" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5917"/>
-			</rt_info>
+		<layer id="3449" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5917">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3695" name="onnx::Unsqueeze_5918" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5918"/>
-			</rt_info>
+		<layer id="3450" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_5918">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3696" name="onnx::Unsqueeze_5919" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5919"/>
-			</rt_info>
+		<layer id="3451" name="Constant_85885_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1657326802" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5919">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3697" name="onnx::Add_5920" type="Unsqueeze" version="opset1">
+		<layer id="3452" name="Constant_85885" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5920"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5920">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3698" name="onnx::Cast_5921" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.844, onnx::Cast_5921"/>
-			</rt_info>
+		<layer id="3453" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.844,onnx::Cast_5921">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3699" name="onnx::Reshape_5923" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5923"/>
-			</rt_info>
+		<layer id="3454" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5923">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3700" name="onnx::InstanceNormalization_5924" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5924"/>
-			</rt_info>
+		<layer id="3455" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5924">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3701" name="Constant_24394" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24394"/>
-			</rt_info>
+		<layer id="3456" name="Constant_28025" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3702" name="MVN_24395" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24414, Concat_24459, MVN_24395, Multiply_24442, Reshape_24415, Reshape_24460, onnx::Reshape_5927"/>
-			</rt_info>
+		<layer id="3457" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5927">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3703" name="onnx::Reshape_5928" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5928"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
+		<layer id="3458" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_5928">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3704" name="onnx::Mul_5929" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5929"/>
-			</rt_info>
+		<layer id="3459" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5929">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3705" name="Constant_150527" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3217262384" size="2560"/>
+		<layer id="3460" name="Constant_87142_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3706" name="onnx::Add_5932" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3461" name="Constant_87142" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5932"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5932">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3707" name="Constant_150528" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3217264944" size="2560"/>
-			<output>
+		<layer id="3462" name="Multiply_86223" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3708" name="onnx::Cast_5935" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.848, onnx::Cast_5935"/>
-			</rt_info>
+		<layer id="3463" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.848,onnx::Cast_5935">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Add_output_0,/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3709" name="input.852" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.852, onnx::Mul_5937"/>
-			</rt_info>
+		<layer id="3464" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.852">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3710" name="m.up_blocks.2.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="3217267504" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="3465" name="Constant_85892_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1658146002" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3711" name="Convolution_24500" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="3466" name="Constant_85892" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_24500"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3712" name="Reshape_24520" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3232013104" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3713" name="onnx::Add_5939" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24519, Reshape_24520, onnx::Add_5939"/>
-			</rt_info>
+		<layer id="3467" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5939">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3714" name="onnx::Div_5941" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.856, onnx::Div_5941"/>
-			</rt_info>
+		<layer id="3468" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3469" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.856,onnx::Div_5941">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3715" name="onnx::Reshape_5956" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5956"/>
-			</rt_info>
+		<layer id="3470" name="Constant_28041" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_5956">
-					<dim>3</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3716" name="onnx::InstanceNormalization_5957" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_5957"/>
-			</rt_info>
+		<layer id="3471" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_5957">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3717" name="Constant_24624" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24624"/>
-			</rt_info>
+		<layer id="3472" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3718" name="MVN_24625" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24644, Concat_24689, MVN_24625, Multiply_24672, Reshape_24645, Reshape_24690, onnx::Reshape_5960"/>
-			</rt_info>
+		<layer id="3473" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5960">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3719" name="onnx::Reshape_5961" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5944, onnx::Gather_5947, onnx::Gather_5950, onnx::Gather_5953, onnx::Reshape_5961"/>
-			</rt_info>
+		<layer id="3474" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>1024</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5944,onnx::Gather_5947,onnx::Gather_5950,onnx::Gather_5953,onnx::Reshape_5961">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3475" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3720" name="onnx::Mul_5962" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_5962"/>
-			</rt_info>
+		<layer id="3476" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5962">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3721" name="Constant_150529" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3232015664" size="2560"/>
+		<layer id="3477" name="Constant_28067" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3722" name="onnx::Add_5965" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5965"/>
-			</rt_info>
+		<layer id="3478" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5965">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3723" name="Constant_150530" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3232018224" size="2560"/>
+		<layer id="3479" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3724" name="input.860" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.860"/>
-			</rt_info>
+		<layer id="3480" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.860">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3725" name="m.up_blocks.2.attentions.1.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="3232020784" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.attentions.1.proj_in.weight"/>
-			</rt_info>
+		<layer id="3481" name="Constant_85899_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1658965202" size="819200" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.attentions.1.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3726" name="Convolution_24727" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3482" name="Constant_85899" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_24727"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3483" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>640</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3727" name="Reshape_24747" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3233659184" size="2560"/>
-			<output>
+		<layer id="3484" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3728" name="onnx::Transpose_5969" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_24746, Reshape_24747, onnx::Transpose_5969"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_5969">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3729" name="Constant_24775" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24775"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3730" name="onnx::Reshape_5970" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5970"/>
-			</rt_info>
+		<layer id="3485" name="/up_blocks.2/attentions.2/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_5970">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3731" name="Constant_89508" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24601, onnx::Concat_5973, onnx::Gather_5945, onnx::Unsqueeze_5946, onnx::Unsqueeze_5972"/>
-			</rt_info>
+		<layer id="3486" name="Constant_28079" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3732" name="Constant_24601" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24601"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3733" name="onnx::Unsqueeze_5946" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24601, onnx::Concat_5973, onnx::Gather_5945, onnx::Unsqueeze_5946, onnx::Unsqueeze_5972"/>
-			</rt_info>
+		<layer id="3487" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5973">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3734" name="onnx::Gather_5951" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5951"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5951"/>
-			</output>
-		</layer>
-		<layer id="3735" name="Constant_24609" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24609"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3736" name="onnx::Mul_5952" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24609, onnx::Gather_5951, onnx::Mul_5952"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_5952"/>
-			</output>
-		</layer>
-		<layer id="3737" name="onnx::Gather_5954" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5954"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5954"/>
-			</output>
-		</layer>
-		<layer id="3738" name="Constant_24613" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24613"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3739" name="onnx::Mul_5955" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24613, onnx::Gather_5954, onnx::Mul_5955"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_5955"/>
-			</output>
-		</layer>
-		<layer id="3740" name="onnx::Unsqueeze_5971" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5971"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_5971"/>
-			</output>
-		</layer>
-		<layer id="3741" name="onnx::Unsqueeze_5974" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_5974"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_5974">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm2/Div_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3742" name="onnx::Concat_5975" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_5975, onnx::Unsqueeze_5974"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3488" name="Constant_87144_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1659784402" size="1280" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_5975">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3743" name="Constant_89517" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24605, onnx::Concat_5977, onnx::Gather_5948, onnx::Unsqueeze_5949, onnx::Unsqueeze_5976"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3744" name="Constant_24605" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24605"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3745" name="onnx::Unsqueeze_5949" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3489" name="Constant_87144" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24605, onnx::Concat_5977, onnx::Gather_5948, onnx::Unsqueeze_5949, onnx::Unsqueeze_5976"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_5977">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3746" name="onnx::Reshape_5978" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_5978"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_5978">
-					<dim>3</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3747" name="input.864" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.864"/>
-			</rt_info>
+		<layer id="3490" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
+					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.864">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3748" name="Constant_24860" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24860"/>
-			</rt_info>
+		<layer id="3491" name="Constant_87145_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1659785682" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3749" name="onnx::Mul_5988" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="3492" name="Constant_87145" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5984, onnx::Div_5987, onnx::Mul_5988, onnx::Pow_5981, onnx::ReduceMean_5983, onnx::Sqrt_5986, onnx::Sub_5980"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_5988">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3750" name="Constant_150531" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3233661744" size="2560"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3751" name="onnx::Add_5989" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_5989"/>
-			</rt_info>
+		<layer id="3493" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -63347,65 +57618,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_5989">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3752" name="Constant_150532" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3233664304" size="2560"/>
+		<layer id="3494" name="Constant_85907_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1659786962" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3753" name="onnx::MatMul_5990" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3495" name="Constant_85907" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_5990"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
+				<port id="0" precision="FP16">
 					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_5990">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3754" name="Constant_148279" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3233666864" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9001, q.175"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>640</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3755" name="q.175" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9001, q.175"/>
-			</rt_info>
+		<layer id="3496" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -63418,153 +57666,162 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.175">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3756" name="Constant_108648" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3497" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="40196022" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3757" name="onnx::Gather_5997" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5997, onnx::Gather_6000, onnx::Gather_6003"/>
-			</rt_info>
+		<layer id="3498" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>640</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_5997,onnx::Gather_6000,onnx::Gather_6003">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3758" name="onnx::Gather_6004" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6004"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6004"/>
-			</output>
-		</layer>
-		<layer id="3759" name="Constant_24886" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24886"/>
-			</rt_info>
+		<layer id="3499" name="Constant_28102" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3760" name="onnx::Div_6005" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24886, onnx::Div_6005, onnx::Gather_6004"/>
-			</rt_info>
+		<layer id="3500" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>8</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_6005"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3761" name="onnx::Div_6006" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6006"/>
-			</rt_info>
+		<layer id="3501" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="40196054" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_6006"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3762" name="onnx::Cast_6007" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6007, onnx::Cast_6008, onnx::Div_6006, onnx::Unsqueeze_6009"/>
-			</rt_info>
+		<layer id="3502" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6007,onnx::Cast_6008,onnx::Unsqueeze_6009"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3763" name="onnx::Unsqueeze_6017" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6017"/>
-			</rt_info>
+		<layer id="3503" name="Constant_85914_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="1660606162" size="983040" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6017">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3764" name="onnx::Concat_6018" type="Unsqueeze" version="opset1">
+		<layer id="3504" name="Constant_85914" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6018, onnx::Unsqueeze_6017"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6018">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3765" name="onnx::Reshape_6019" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6019"/>
-			</rt_info>
+		<layer id="3505" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3506" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_2_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3766" name="onnx::Transpose_6020" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6020"/>
-			</rt_info>
+		<layer id="3507" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
+					<dim>77</dim>
 					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -63572,33 +57829,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6020">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3767" name="Constant_24999" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24999"/>
-			</rt_info>
+		<layer id="3508" name="Constant_28118" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3768" name="onnx::Reshape_6021" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6021"/>
-			</rt_info>
+		<layer id="3509" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
+					<dim>77</dim>
 					<dim>8</dim>
 					<dim>80</dim>
 				</port>
@@ -63607,416 +57858,342 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6021">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
-					<dim>1024</dim>
+					<dim>77</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3769" name="onnx::Gather_5998" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_5998"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_5998"/>
-			</output>
-		</layer>
-		<layer id="3770" name="Constant_24878" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24878"/>
-			</rt_info>
+		<layer id="3510" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3771" name="onnx::Unsqueeze_5999" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24878, onnx::Gather_5998, onnx::Unsqueeze_5999"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_5999"/>
 			</output>
 		</layer>
-		<layer id="3772" name="onnx::Mul_6022" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6022"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6022"/>
-			</output>
-		</layer>
-		<layer id="3773" name="onnx::Unsqueeze_6023" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6022, onnx::Unsqueeze_6023"/>
-			</rt_info>
+		<layer id="3511" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6023"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3774" name="onnx::Unsqueeze_6028" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6028"/>
-			</rt_info>
+		<layer id="3512" name="Constant_87146_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="41015278" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6028">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3775" name="onnx::Concat_6029" type="Unsqueeze" version="opset1">
+		<layer id="3513" name="Constant_87146" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6029, onnx::Unsqueeze_6028"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6029">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3776" name="Constant_89544" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24882, onnx::Concat_6031, onnx::Gather_6001, onnx::Unsqueeze_6002, onnx::Unsqueeze_6030"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3777" name="Constant_24882" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24882"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3778" name="onnx::Unsqueeze_6002" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_24882, onnx::Concat_6031, onnx::Gather_6001, onnx::Unsqueeze_6002, onnx::Unsqueeze_6030"/>
-			</rt_info>
+		<layer id="3514" name="Multiply_86225" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6031">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3779" name="onnx::Div_6024" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6024"/>
-			</rt_info>
+		<layer id="3515" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_6024"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Add_output_0,/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3780" name="onnx::Cast_6025" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6025, onnx::Cast_6026, onnx::Div_6024, onnx::Unsqueeze_6027"/>
-			</rt_info>
+		<layer id="3516" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6025,onnx::Cast_6026,onnx::Unsqueeze_6027"/>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3781" name="onnx::Unsqueeze_6032" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6032"/>
-			</rt_info>
+		<layer id="3517" name="Constant_85921_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 768" offset="1661589202" size="983040" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6032">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3782" name="onnx::Concat_6033" type="Unsqueeze" version="opset1">
+		<layer id="3518" name="Constant_85921" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6033, onnx::Unsqueeze_6032"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6033">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3783" name="onnx::Reshape_6034" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6034"/>
-			</rt_info>
+		<layer id="3519" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6034">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3784" name="q.179" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.179"/>
-			</rt_info>
+		<layer id="3520" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="44458512" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3521" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>77</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.179">
-					<dim>16</dim>
-					<dim>1024</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_4_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
 					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3785" name="Constant_148286" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3235305264" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.175, onnx::MatMul_9002"/>
-			</rt_info>
+		<layer id="3522" name="Constant_28134" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3786" name="k.175" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.175, onnx::MatMul_9002"/>
-			</rt_info>
+		<layer id="3523" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>80</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.175">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3787" name="Constant_108717" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3524" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="44458544" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3788" name="onnx::Gather_6036" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6036, onnx::Gather_6039, onnx::Gather_6042"/>
-			</rt_info>
+		<layer id="3525" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6036,onnx::Gather_6039,onnx::Gather_6042">
-					<dim>3</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>80</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3789" name="onnx::Gather_6043" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6043"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6043"/>
-			</output>
-		</layer>
-		<layer id="3790" name="Constant_25127" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25127"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3791" name="onnx::Div_6044" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25127, onnx::Div_6044, onnx::Gather_6043"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_6044"/>
-			</output>
-		</layer>
-		<layer id="3792" name="onnx::Div_6045" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6045"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6045"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3793" name="onnx::Cast_6046" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6046, onnx::Cast_6047, onnx::Div_6045, onnx::Unsqueeze_6048"/>
-			</rt_info>
+		<layer id="3526" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>80</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6046,onnx::Cast_6047,onnx::Unsqueeze_6048"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3794" name="onnx::Unsqueeze_6056" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6056"/>
-			</rt_info>
+		<layer id="3527" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="41834480" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6056">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3795" name="onnx::Concat_6057" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6057, onnx::Unsqueeze_6056"/>
-			</rt_info>
+		<layer id="3528" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6057">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>1024</dim>
+					<dim>80</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3796" name="onnx::Reshape_6058" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6058"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3529" name="Constant_28160" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="3" precision="I64">
+				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3797" name="onnx::Transpose_6059" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6059"/>
-			</rt_info>
+		<layer id="3530" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6059">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>1024</dim>
 					<dim>8</dim>
@@ -64024,21 +58201,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="3798" name="Constant_25240" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25240"/>
-			</rt_info>
+		<layer id="3531" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="39374238" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3799" name="onnx::Reshape_6060" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6060"/>
-			</rt_info>
+		<layer id="3532" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -64047,469 +58219,441 @@
 					<dim>80</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6060">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3800" name="onnx::Gather_6037" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6037"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6037"/>
-			</output>
-		</layer>
-		<layer id="3801" name="Constant_25119" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25119"/>
-			</rt_info>
+		<layer id="3533" name="Constant_85928_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640" offset="1662572242" size="819200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3802" name="onnx::Unsqueeze_6038" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3534" name="Constant_85928" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25119, onnx::Gather_6037, onnx::Unsqueeze_6038"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6038"/>
-			</output>
-		</layer>
-		<layer id="3803" name="onnx::Mul_6061" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6061"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6061"/>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3804" name="onnx::Unsqueeze_6062" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6061, onnx::Unsqueeze_6062"/>
-			</rt_info>
+		<layer id="3535" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6062"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3805" name="onnx::Unsqueeze_6067" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6067"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6067">
+		<layer id="3536" name="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3806" name="onnx::Concat_6068" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6068, onnx::Unsqueeze_6067"/>
-			</rt_info>
+		<layer id="3537" name="/up_blocks.2/attentions.2/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6068">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/Add_1_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3807" name="Constant_89571" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25123, onnx::Concat_6070, onnx::Gather_6040, onnx::Unsqueeze_6041, onnx::Unsqueeze_6069"/>
-			</rt_info>
+		<layer id="3538" name="Constant_28172" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3808" name="Constant_25123" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25123"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3809" name="onnx::Unsqueeze_6041" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25123, onnx::Concat_6070, onnx::Gather_6040, onnx::Unsqueeze_6041, onnx::Unsqueeze_6069"/>
-			</rt_info>
+		<layer id="3539" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6070">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm3/Div_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3810" name="onnx::Div_6063" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6063"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6063"/>
-			</output>
-		</layer>
-		<layer id="3811" name="onnx::Cast_6064" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6064, onnx::Cast_6065, onnx::Div_6063, onnx::Unsqueeze_6066"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6064,onnx::Cast_6065,onnx::Unsqueeze_6066"/>
-			</output>
-		</layer>
-		<layer id="3812" name="onnx::Unsqueeze_6071" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6071"/>
-			</rt_info>
+		<layer id="3540" name="Constant_87148_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1663391442" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6071">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3813" name="onnx::Concat_6072" type="Unsqueeze" version="opset1">
+		<layer id="3541" name="Constant_87148" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6072, onnx::Unsqueeze_6071"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6072">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3814" name="onnx::Reshape_6073" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6073"/>
-			</rt_info>
+		<layer id="3542" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6073">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm3/Mul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3815" name="k.179" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="3543" name="Constant_87149_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 640" offset="1663392722" size="1280" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3544" name="Constant_87149" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.179"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.179">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3816" name="onnx::Mul_6114" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6114"/>
-			</rt_info>
+		<layer id="3545" name="/up_blocks.2/attentions.2/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6114">
-					<dim>16</dim>
-					<dim>1024</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/norm3/Add_1_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3817" name="Constant_150533" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
+		<layer id="3546" name="Constant_85936_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="5120, 640" offset="1663394002" size="6553600" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3818" name="onnx::Softmax_6116" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3547" name="Constant_85936" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_6116"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>5120</dim>
+					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>5120</dim>
+					<dim>640</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3548" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>5120</dim>
+					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_6116">
-					<dim>16</dim>
-					<dim>1024</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3819" name="attn.87" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.87"/>
-			</rt_info>
+		<layer id="3549" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>5120</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.87">
-					<dim>16</dim>
-					<dim>1024</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/proj/Add_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
+					<dim>5120</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3820" name="Constant_148293" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3236943664" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9003, v.175"/>
-			</rt_info>
+		<layer id="3550" name="Constant_78620" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3821" name="v.175" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9003, v.175"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
+		<layer id="3551" name="Constant_78621" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="FP32" names="v.175">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3822" name="Constant_108786" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3552" name="Constant_78617" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3823" name="onnx::Gather_6075" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6075, onnx::Gather_6078, onnx::Gather_6081"/>
-			</rt_info>
+		<layer id="3553" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>5120</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6075,onnx::Gather_6078,onnx::Gather_6081">
+				<port id="1" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3824" name="onnx::Gather_6082" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6082"/>
-			</rt_info>
+		<layer id="3554" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6082"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3825" name="Constant_25368" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25368"/>
-			</rt_info>
+		<layer id="3555" name="Constant_28189" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="3826" name="onnx::Div_6083" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25368, onnx::Div_6083, onnx::Gather_6082"/>
-			</rt_info>
+		<layer id="3556" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_6083"/>
+				<port id="3" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Gather_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3827" name="onnx::Div_6084" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6084"/>
-			</rt_info>
+		<layer id="3557" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_6084"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_2_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3828" name="onnx::Cast_6085" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6085, onnx::Cast_6086, onnx::Div_6084, onnx::Unsqueeze_6087"/>
-			</rt_info>
+		<layer id="3558" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6085,onnx::Cast_6086,onnx::Unsqueeze_6087"/>
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3829" name="onnx::Unsqueeze_6095" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6095"/>
-			</rt_info>
+		<layer id="3559" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6095">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3830" name="onnx::Concat_6096" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6096, onnx::Unsqueeze_6095"/>
-			</rt_info>
+		<layer id="3560" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6096">
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3831" name="onnx::Reshape_6097" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6097"/>
-			</rt_info>
+		<layer id="3561" name="Constant_78616" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
+			<output>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3562" name="ScatterUpdate_78622" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
@@ -64517,14777 +58661,2092 @@
 				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3832" name="onnx::Transpose_6098" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6098"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="3" precision="I32">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6098">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3833" name="Constant_25481" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25481"/>
-			</rt_info>
+		<layer id="3563" name="Constant_78625" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3834" name="onnx::Reshape_6099" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6099"/>
-			</rt_info>
+		<layer id="3564" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6099">
+				<port id="4" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3835" name="onnx::Gather_6076" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6076"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6076"/>
-			</output>
-		</layer>
-		<layer id="3836" name="Constant_25360" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25360"/>
-			</rt_info>
+		<layer id="3565" name="Constant_78689" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3837" name="onnx::Unsqueeze_6077" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25360, onnx::Gather_6076, onnx::Unsqueeze_6077"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6077"/>
-			</output>
-		</layer>
-		<layer id="3838" name="onnx::Mul_6100" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6100"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6100"/>
 			</output>
 		</layer>
-		<layer id="3839" name="onnx::Unsqueeze_6101" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6100, onnx::Unsqueeze_6101"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="3566" name="Constant_78688" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6101"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3840" name="onnx::Unsqueeze_6106" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6106"/>
-			</rt_info>
+		<layer id="3567" name="Constant_78687" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6106">
+				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3841" name="onnx::Concat_6107" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6107, onnx::Unsqueeze_6106"/>
-			</rt_info>
+		<layer id="3568" name="ScatterUpdate_78690" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6107">
-					<dim>1</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3842" name="Constant_89598" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25364, onnx::Concat_6109, onnx::Gather_6079, onnx::Unsqueeze_6080, onnx::Unsqueeze_6108"/>
-			</rt_info>
+		<layer id="3569" name="Constant_78691" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3843" name="Constant_25364" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25364"/>
-			</rt_info>
+		<layer id="3570" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Constant_5_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3844" name="onnx::Unsqueeze_6080" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25364, onnx::Concat_6109, onnx::Gather_6079, onnx::Unsqueeze_6080, onnx::Unsqueeze_6108"/>
-			</rt_info>
+		<layer id="3571" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6109">
+				<port id="2" precision="I64" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3845" name="onnx::Div_6102" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6102"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6102"/>
-			</output>
-		</layer>
-		<layer id="3846" name="onnx::Cast_6103" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6103, onnx::Cast_6104, onnx::Div_6102, onnx::Unsqueeze_6105"/>
-			</rt_info>
+		<layer id="3572" name="ScatterUpdate_78692" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6103,onnx::Cast_6104,onnx::Unsqueeze_6105"/>
-			</output>
-		</layer>
-		<layer id="3847" name="onnx::Unsqueeze_6110" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6110"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6110">
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3848" name="onnx::Concat_6111" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6111, onnx::Unsqueeze_6110"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6111">
-					<dim>1</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3849" name="onnx::Reshape_6112" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6112"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3573" name="Constant_78695" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6112">
+				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3850" name="v.179" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.179"/>
-			</rt_info>
+		<layer id="3574" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>5120</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.179">
-					<dim>16</dim>
+				<port id="4" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Slice_1_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3851" name="out.87" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.87"/>
-			</rt_info>
+		<layer id="3575" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.87">
-					<dim>16</dim>
+				<port id="1" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_3_output_0">
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3852" name="onnx::Gather_6119" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6119, onnx::Gather_6122, onnx::Gather_6125"/>
-			</rt_info>
+		<layer id="3576" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2560</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6119,onnx::Gather_6122,onnx::Gather_6125">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.0/Mul_4_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3853" name="onnx::Gather_6120" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6120"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6120"/>
-			</output>
-		</layer>
-		<layer id="3854" name="Constant_25606" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25606"/>
-			</rt_info>
+		<layer id="3577" name="Constant_85944_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 2560" offset="1669947602" size="3276800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3855" name="onnx::Div_6121" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3578" name="Constant_85944" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25606, onnx::Div_6121, onnx::Gather_6120"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_6121"/>
-			</output>
-		</layer>
-		<layer id="3856" name="onnx::Div_6128" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6128"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6128"/>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3857" name="onnx::Cast_6129" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6129, onnx::Cast_6130, onnx::Div_6128, onnx::Unsqueeze_6131"/>
-			</rt_info>
+		<layer id="3579" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>2560</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>2560</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6129,onnx::Cast_6130,onnx::Unsqueeze_6131"/>
-			</output>
-		</layer>
-		<layer id="3858" name="onnx::Unsqueeze_6133" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6133"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6133">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.2/MatMul_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3859" name="onnx::Concat_6134" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6134, onnx::Unsqueeze_6133"/>
-			</rt_info>
+		<layer id="3580" name="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6134">
 					<dim>1</dim>
+					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3860" name="Constant_90870" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6141"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3861" name="Constant_90871" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3862" name="Gather_90872" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6141"/>
-			</rt_info>
+		<layer id="3581" name="/up_blocks.2/attentions.2/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
+					<dim>1024</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3863" name="onnx::Reshape_6141" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6141"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="3582" name="/up_blocks.2/attentions.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="56093768" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6141">
+				<port id="0" precision="I64" names="/up_blocks.2/attentions.2/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3864" name="onnx::Transpose_6142" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6142"/>
-			</rt_info>
+		<layer id="3583" name="/up_blocks.2/attentions.2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6142">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3865" name="Constant_25727" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25727"/>
-			</rt_info>
+		<layer id="3584" name="Constant_28354" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3866" name="onnx::Reshape_6143" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6143"/>
-			</rt_info>
+		<layer id="3585" name="/up_blocks.2/attentions.2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+					<dim>640</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6143">
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3867" name="onnx::Div_6144" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6144"/>
-			</rt_info>
+		<layer id="3586" name="up_blocks.2.attentions.2.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 1, 1" offset="1673224402" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_6144"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3868" name="onnx::Cast_6145" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3587" name="up_blocks.2.attentions.2.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6145, onnx::Cast_6146, onnx::Div_6144, onnx::Unsqueeze_6147"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6145,onnx::Cast_6146,onnx::Unsqueeze_6147"/>
-			</output>
-		</layer>
-		<layer id="3869" name="onnx::Unsqueeze_6150" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6150"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6150">
+				<port id="1" precision="FP32" names="up_blocks.2.attentions.2.proj_out.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3870" name="onnx::Concat_6151" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6151, onnx::Unsqueeze_6150"/>
-			</rt_info>
+		<layer id="3588" name="/up_blocks.2/attentions.2/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6151">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3871" name="Constant_89625" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25610, onnx::Concat_6153, onnx::Gather_6123, onnx::Unsqueeze_6124, onnx::Unsqueeze_6152"/>
-			</rt_info>
+		<layer id="3589" name="Reshape_28376_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1674043602" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3872" name="Constant_25610" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3590" name="Reshape_28376" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25610"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3873" name="onnx::Unsqueeze_6124" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25610, onnx::Concat_6153, onnx::Gather_6123, onnx::Unsqueeze_6124, onnx::Unsqueeze_6152"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6153">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3874" name="onnx::Gather_6126" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6126"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6126"/>
-			</output>
-		</layer>
-		<layer id="3875" name="Constant_25614" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25614"/>
-			</rt_info>
+		<layer id="3591" name="/up_blocks.2/attentions.2/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/proj_out/Conv_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3876" name="onnx::Unsqueeze_6127" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25614, onnx::Gather_6126, onnx::Unsqueeze_6127"/>
-			</rt_info>
+		<layer id="3592" name="/up_blocks.2/attentions.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6127"/>
+				<port id="2" precision="FP32" names="/up_blocks.2/attentions.2/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3877" name="onnx::Mul_6148" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6148"/>
-			</rt_info>
+		<layer id="3593" name="ShapeOf_28407" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6148"/>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3878" name="onnx::Unsqueeze_6149" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6148, onnx::Unsqueeze_6149"/>
-			</rt_info>
+		<layer id="3594" name="Convert_28408" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6149"/>
+				<port id="1" precision="FP32">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3879" name="onnx::Unsqueeze_6154" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6154"/>
-			</rt_info>
+		<layer id="3595" name="/up_blocks.2/upsamplers.0/Constant" type="Const" version="opset1">
+			<data element_type="f32" shape="4" offset="992563906" size="16" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6154">
-					<dim>1</dim>
+				<port id="0" precision="FP32" names="/up_blocks.2/upsamplers.0/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3880" name="onnx::Concat_6155" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6155, onnx::Unsqueeze_6154"/>
-			</rt_info>
+		<layer id="3596" name="Multiply_28409" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>4</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6155">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3881" name="onnx::Reshape_6156" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6156"/>
-			</rt_info>
+		<layer id="3597" name="Convert_28410" type="Convert" version="opset1">
+			<data destination_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6156">
-					<dim>3</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3882" name="onnx::MatMul_6157" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6157"/>
-			</rt_info>
+		<layer id="3598" name="/up_blocks.2/upsamplers.0/Resize" type="Interpolate" version="opset4">
+			<data mode="nearest" shape_calculation_mode="scales" coordinate_transformation_mode="asymmetric" nearest_mode="floor" antialias="false" pads_begin="0, 0, 0, 0" pads_end="0, 0, 0, 0" cube_coeff="-0.75" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>640</dim>
+					<dim>32</dim>
+					<dim>32</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
+				</port>
+				<port id="2" precision="FP32">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6157">
+				<port id="3" precision="FP32" names="/up_blocks.2/upsamplers.0/Resize_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3883" name="Constant_148300" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3238582064" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6159, onnx::MatMul_9008"/>
-			</rt_info>
+		<layer id="3599" name="up_blocks.2.upsamplers.0.conv.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="640, 640, 3, 3" offset="1674044882" size="7372800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>640</dim>
 					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3884" name="onnx::Add_6159" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3600" name="up_blocks.2.upsamplers.0.conv.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6159, onnx::MatMul_9008"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.2.upsamplers.0.conv.weight">
+					<dim>640</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3601" name="/up_blocks.2/upsamplers.0/conv/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>640</dim>
 					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6159">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3602" name="Reshape_28432_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1681417682" size="1280" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3885" name="input.868" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3603" name="Reshape_28432" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.868"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.868">
-					<dim>2</dim>
-					<dim>1024</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3886" name="input.872" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.872"/>
-			</rt_info>
+		<layer id="3604" name="/up_blocks.2/upsamplers.0/conv/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
+					<dim>1</dim>
 					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.872">
+				<port id="2" precision="FP32" names="/up_blocks.2/upsamplers.0/conv/Conv_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3887" name="Constant_25848" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25848"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3888" name="onnx::Mul_6170" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6166, onnx::Div_6169, onnx::Mul_6170, onnx::Pow_6163, onnx::ReduceMean_6165, onnx::Sqrt_6168, onnx::Sub_6162"/>
-			</rt_info>
+		<layer id="3605" name="/up_blocks.3/Concat" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
 					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6170">
+				<port id="2" precision="FP32" names="/up_blocks.3/Concat_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3889" name="Constant_150535" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3240220464" size="2560"/>
+		<layer id="3606" name="up_blocks.3.resnets.0.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 960, 1, 1" offset="1681418962" size="614400" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>960</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3890" name="onnx::Add_6171" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3607" name="up_blocks.3.resnets.0.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6171"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>960</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6171">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3891" name="Constant_150536" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3240223024" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.0.conv_shortcut.weight">
+					<dim>320</dim>
+					<dim>960</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3892" name="onnx::MatMul_6172" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6172"/>
-			</rt_info>
+		<layer id="3608" name="/up_blocks.3/resnets.0/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>960</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>640</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6172">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3893" name="Constant_148308" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3240225584" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9009, q.183"/>
-			</rt_info>
+		<layer id="3609" name="Reshape_28816_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1682033362" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3894" name="q.183" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3610" name="Reshape_28816" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9009, q.183"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3611" name="/up_blocks.3/resnets.0/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.183">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3895" name="Constant_108855" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3612" name="/up_blocks.3/resnets.0/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.0/norm1/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3896" name="onnx::Gather_6179" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6179, onnx::Gather_6182, onnx::Gather_6185"/>
-			</rt_info>
+		<layer id="3613" name="/up_blocks.3/resnets.0/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6179,onnx::Gather_6182,onnx::Gather_6185">
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="3897" name="onnx::Gather_6186" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6186"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6186"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>122880</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3898" name="Constant_25874" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25874"/>
-			</rt_info>
+		<layer id="3614" name="Constant_28470" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3899" name="onnx::Div_6187" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25874, onnx::Div_6187, onnx::Gather_6186"/>
-			</rt_info>
+		<layer id="3615" name="MVN_28471" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>122880</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_6187"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm1/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>122880</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3900" name="onnx::Div_6188" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6188"/>
-			</rt_info>
+		<layer id="3616" name="/up_blocks.3/resnets.0/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_6188"/>
+				<port id="1" precision="I64" names="/up_blocks.3/resnets.0/norm1/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3901" name="onnx::Cast_6189" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6189, onnx::Cast_6190, onnx::Div_6188, onnx::Unsqueeze_6191"/>
-			</rt_info>
+		<layer id="3617" name="/up_blocks.3/resnets.0/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>122880</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6189,onnx::Cast_6190,onnx::Unsqueeze_6191"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm1/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3902" name="onnx::Unsqueeze_6199" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6199"/>
-			</rt_info>
+		<layer id="3618" name="Constant_87152_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 960, 1, 1" offset="1682034002" size="1920" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6199">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3903" name="onnx::Concat_6200" type="Unsqueeze" version="opset1">
+		<layer id="3619" name="Constant_87152" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6200, onnx::Unsqueeze_6199"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6200">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3904" name="onnx::Reshape_6201" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6201"/>
-			</rt_info>
+		<layer id="3620" name="/up_blocks.3/resnets.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3905" name="onnx::Transpose_6202" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6202"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
+		<layer id="3621" name="Constant_87153_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 960, 1, 1" offset="1682035922" size="1920" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6202">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3906" name="Constant_25987" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
+		<layer id="3622" name="Constant_87153" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25987"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3907" name="onnx::Reshape_6203" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6203"/>
-			</rt_info>
+		<layer id="3623" name="/up_blocks.3/resnets.0/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>960</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6203">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm1/Add_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3908" name="onnx::Gather_6180" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6180"/>
-			</rt_info>
+		<layer id="3624" name="/up_blocks.3/resnets.0/nonlinearity/Mul" type="Swish" version="opset4">
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6180"/>
+				<port id="1" precision="FP32" names="/up_blocks.3/resnets.0/nonlinearity/Mul_output_0">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3909" name="Constant_25866" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25866"/>
-			</rt_info>
+		<layer id="3625" name="up_blocks.3.resnets.0.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 960, 3, 3" offset="1682037842" size="5529600" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>960</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3910" name="onnx::Unsqueeze_6181" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3626" name="up_blocks.3.resnets.0.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25866, onnx::Gather_6180, onnx::Unsqueeze_6181"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>960</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6181"/>
-			</output>
-		</layer>
-		<layer id="3911" name="onnx::Mul_6204" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6204"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6204"/>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.0.conv1.weight">
+					<dim>320</dim>
+					<dim>960</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3912" name="onnx::Unsqueeze_6205" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6204, onnx::Unsqueeze_6205"/>
-			</rt_info>
+		<layer id="3627" name="/up_blocks.3/resnets.0/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>960</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>960</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6205"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3913" name="onnx::Unsqueeze_6210" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6210"/>
-			</rt_info>
+		<layer id="3628" name="Reshape_28595_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1687567442" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6210">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3914" name="onnx::Concat_6211" type="Unsqueeze" version="opset1">
+		<layer id="3629" name="Reshape_28595" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6211, onnx::Unsqueeze_6210"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6211">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3915" name="Constant_89652" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25870, onnx::Concat_6213, onnx::Gather_6183, onnx::Unsqueeze_6184, onnx::Unsqueeze_6212"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+		<layer id="3630" name="/up_blocks.3/resnets.0/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/conv1/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3916" name="Constant_25870" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25870"/>
-			</rt_info>
+		<layer id="3631" name="up_blocks.3.resnets.0.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="1687568082" size="819200" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3917" name="onnx::Unsqueeze_6184" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3632" name="up_blocks.3.resnets.0.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_25870, onnx::Concat_6213, onnx::Gather_6183, onnx::Unsqueeze_6184, onnx::Unsqueeze_6212"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6213">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.0.time_emb_proj.weight">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3918" name="onnx::Div_6206" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6206"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6206"/>
-			</output>
-		</layer>
-		<layer id="3919" name="onnx::Cast_6207" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6207, onnx::Cast_6208, onnx::Div_6206, onnx::Unsqueeze_6209"/>
-			</rt_info>
+		<layer id="3633" name="/up_blocks.3/resnets.0/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6207,onnx::Cast_6208,onnx::Unsqueeze_6209"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="3920" name="onnx::Unsqueeze_6214" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6214"/>
-			</rt_info>
+		<layer id="3634" name="Constant_87154_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320" offset="1688387282" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6214">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3921" name="onnx::Concat_6215" type="Unsqueeze" version="opset1">
+		<layer id="3635" name="Constant_87154" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6215, onnx::Unsqueeze_6214"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6215">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3922" name="onnx::Reshape_6216" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6216"/>
-			</rt_info>
+		<layer id="3636" name="/up_blocks.3/resnets.0/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6216">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/time_emb_proj/Gemm_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3923" name="q.187" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.187"/>
-			</rt_info>
+		<layer id="3637" name="/up_blocks.3/resnets.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.0/Constant_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3638" name="/up_blocks.3/resnets.0/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.187">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/Unsqueeze_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3924" name="Constant_148315" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="3241863984" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.183, onnx::MatMul_9010"/>
-			</rt_info>
+		<layer id="3639" name="/up_blocks.3/resnets.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.0/Constant_1_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3640" name="/up_blocks.3/resnets.0/Unsqueeze_1" type="Unsqueeze" version="opset1">
+			<input>
 				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/Unsqueeze_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3925" name="k.183" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.183, onnx::MatMul_9010"/>
-			</rt_info>
+		<layer id="3641" name="/up_blocks.3/resnets.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.183">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/Add_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3926" name="onnx::Transpose_6231" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6231"/>
-			</rt_info>
+		<layer id="3642" name="/up_blocks.3/resnets.0/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.0/norm2/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3643" name="/up_blocks.3/resnets.0/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6231">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3927" name="Constant_26108" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26108"/>
-			</rt_info>
+		<layer id="3644" name="Constant_28643" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3928" name="onnx::Reshape_6232" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6232"/>
-			</rt_info>
+		<layer id="3645" name="MVN_28644" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6232">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3929" name="k.187" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.187"/>
-			</rt_info>
+		<layer id="3646" name="/up_blocks.3/resnets.0/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.187">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="1" precision="I64" names="/up_blocks.3/resnets.0/norm2/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3930" name="onnx::Mul_6268" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6268"/>
-			</rt_info>
+		<layer id="3647" name="/up_blocks.3/resnets.0/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6268">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3931" name="Constant_150537" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
+		<layer id="3648" name="Constant_87155_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1688387922" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3932" name="onnx::Softmax_6270" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3649" name="Constant_87155" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_6270"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_6270">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3933" name="attn.91" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.91"/>
-			</rt_info>
+		<layer id="3650" name="/up_blocks.3/resnets.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.91">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="3934" name="Constant_148322" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="3243830064" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9011, v.183"/>
-			</rt_info>
+		<layer id="3651" name="Constant_87156_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1688388562" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3935" name="v.183" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9011, v.183"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.183">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3936" name="onnx::Transpose_6256" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6256"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6256">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3937" name="Constant_26116" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26116"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3938" name="onnx::Reshape_6257" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6257"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6257">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3939" name="v.187" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.187"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.187">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3940" name="out.91" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.91"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="out.91">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3941" name="onnx::Gather_6273" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6273, onnx::Gather_6276, onnx::Gather_6279"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6273,onnx::Gather_6276,onnx::Gather_6279">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3942" name="onnx::Gather_6274" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6274"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6274"/>
-			</output>
-		</layer>
-		<layer id="3943" name="Constant_26128" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26128"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3944" name="onnx::Div_6275" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26128, onnx::Div_6275, onnx::Gather_6274"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6275"/>
-			</output>
-		</layer>
-		<layer id="3945" name="onnx::Div_6282" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6282"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6282"/>
-			</output>
-		</layer>
-		<layer id="3946" name="onnx::Cast_6283" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6283, onnx::Cast_6284, onnx::Div_6282, onnx::Unsqueeze_6285"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6283,onnx::Cast_6284,onnx::Unsqueeze_6285"/>
-			</output>
-		</layer>
-		<layer id="3947" name="onnx::Unsqueeze_6287" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6287"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6287">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3948" name="onnx::Concat_6288" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6288, onnx::Unsqueeze_6287"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6288">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3949" name="Constant_90880" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6295"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3950" name="Constant_90881" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3951" name="Gather_90882" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6295"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3952" name="onnx::Reshape_6295" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6295"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6295">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3953" name="onnx::Transpose_6296" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6296"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6296">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3954" name="Constant_26249" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26249"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3955" name="onnx::Reshape_6297" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6297"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6297">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3956" name="onnx::Div_6298" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6298"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6298"/>
-			</output>
-		</layer>
-		<layer id="3957" name="onnx::Cast_6299" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6299, onnx::Cast_6300, onnx::Div_6298, onnx::Unsqueeze_6301"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6299,onnx::Cast_6300,onnx::Unsqueeze_6301"/>
-			</output>
-		</layer>
-		<layer id="3958" name="onnx::Unsqueeze_6304" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6304"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6304">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3959" name="onnx::Concat_6305" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6305, onnx::Unsqueeze_6304"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6305">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3960" name="Constant_89679" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26132, onnx::Concat_6307, onnx::Gather_6277, onnx::Unsqueeze_6278, onnx::Unsqueeze_6306"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3961" name="Constant_26132" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26132"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3962" name="onnx::Unsqueeze_6278" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26132, onnx::Concat_6307, onnx::Gather_6277, onnx::Unsqueeze_6278, onnx::Unsqueeze_6306"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6307">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3963" name="onnx::Gather_6280" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6280"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6280"/>
-			</output>
-		</layer>
-		<layer id="3964" name="Constant_26136" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26136"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3965" name="onnx::Unsqueeze_6281" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26136, onnx::Gather_6280, onnx::Unsqueeze_6281"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6281"/>
-			</output>
-		</layer>
-		<layer id="3966" name="onnx::Mul_6302" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6302"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6302"/>
-			</output>
-		</layer>
-		<layer id="3967" name="onnx::Unsqueeze_6303" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6302, onnx::Unsqueeze_6303"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6303"/>
-			</output>
-		</layer>
-		<layer id="3968" name="onnx::Unsqueeze_6308" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6308"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6308">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3969" name="onnx::Concat_6309" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6309, onnx::Unsqueeze_6308"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6309">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3970" name="onnx::Reshape_6310" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6310"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6310">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3971" name="onnx::MatMul_6311" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6311"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6311">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3972" name="Constant_148329" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3245796144" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6313, onnx::MatMul_9032"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3973" name="onnx::Add_6313" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6313, onnx::MatMul_9032"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6313">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3974" name="input.876" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.876"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.876">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3975" name="input.880" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.880"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.880">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3976" name="Constant_26370" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26370"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3977" name="onnx::Mul_6324" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6320, onnx::Div_6323, onnx::Mul_6324, onnx::Pow_6317, onnx::ReduceMean_6319, onnx::Sqrt_6322, onnx::Sub_6316"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6324">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3978" name="Constant_150539" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3247434544" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3979" name="onnx::Add_6325" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6325"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6325">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3980" name="Constant_150540" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3247437104" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3981" name="onnx::MatMul_6326" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6326"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6326">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3982" name="Constant_148337" type="Const" version="opset1">
-			<data element_type="f32" shape="5120, 640" offset="3247439664" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6328, onnx::MatMul_9033"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>5120</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3983" name="onnx::Add_6328" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6328, onnx::MatMul_9033"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>5120</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6328">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3984" name="onnx::Shape_6329" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_6329"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_6329">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3985" name="Constant_127878" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3986" name="Constant_127879" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3987" name="Constant_127875" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3988" name="onnx::Gather_6330" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6330"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6330">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3989" name="onnx::Gather_6331" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6331">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3990" name="Constant_26387" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26387"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="3991" name="onnx::Add_6332" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26387, onnx::Add_6332, onnx::Gather_6331"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Add_6332">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3992" name="onnx::Add_6334" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6334"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Add_6334">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3993" name="onnx::Div_6335" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6334, onnx::Div_6335"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Div_6335">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3994" name="onnx::Div_6336" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6336"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6336">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3995" name="onnx::Mul_6337" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6336, onnx::Mul_6337, onnx::Mul_6338, onnx::Slice_6339"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Mul_6337,onnx::Slice_6339">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3996" name="Constant_127874" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3997" name="ScatterUpdate_127880" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3998" name="Constant_127883" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="3999" name="onnx::Mul_6340" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26398, onnx::Gather_6331, onnx::Mul_6340"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_6340">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4000" name="Constant_127947" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4001" name="Constant_127946" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4002" name="Constant_127945" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4003" name="ScatterUpdate_127948" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4004" name="Constant_127949" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4005" name="onnx::Mul_6341" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6341"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6341">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4006" name="onnx::Slice_6342" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6341, onnx::Slice_6342"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Slice_6342">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4007" name="ScatterUpdate_127950" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4008" name="Constant_127953" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4009" name="onnx::Div_6343" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_26467, onnx::Div_6343, onnx::Gather_6331"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="FP32" names="onnx::Div_6343">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4010" name="onnx::Mul_6351" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6346, onnx::Erf_6345, onnx::Mul_6348, onnx::Mul_6349, onnx::Mul_6351"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_6351">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4011" name="input.884" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.884"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.884">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4012" name="Constant_148345" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 2560" offset="3260546864" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6354, onnx::MatMul_9034"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4013" name="onnx::Add_6354" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6354, onnx::MatMul_9034"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6354">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4014" name="onnx::Add_6355" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6355"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6355">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4015" name="onnx::Reshape_6356" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6356"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6356">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4016" name="Constant_90893" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6365"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4017" name="Constant_90894" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4018" name="Gather_90895" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6365"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6365">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4019" name="onnx::Transpose_6366" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6366"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6366">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4020" name="Constant_26632" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26632"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4021" name="input.888" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.888"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.888">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4022" name="m.up_blocks.2.attentions.1.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="3267100464" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.attentions.1.proj_out.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.attentions.1.proj_out.weight">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4023" name="Convolution_26634" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_26634"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4024" name="Reshape_26654" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3268738864" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4025" name="onnx::Add_6368" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_26653, Reshape_26654, onnx::Add_6368"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6368">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4026" name="onnx::Concat_6369" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6369"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_6369">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4027" name="onnx::Cast_6370" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.892, onnx::Cast_6370"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.892,onnx::Cast_6370">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4028" name="m.up_blocks.2.resnets.2.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 960, 1, 1" offset="3268741424" size="2457600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.2.conv_shortcut.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.2.conv_shortcut.weight">
-					<dim>640</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4029" name="Convolution_27024" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_27024"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4030" name="Reshape_27044" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3271199024" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4031" name="onnx::Add_6415" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_27043, Reshape_27044, onnx::Add_6415"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6415">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4032" name="onnx::Reshape_6372" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6372"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_6372">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4033" name="onnx::InstanceNormalization_6373" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_6373"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_6373">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>30720</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4034" name="Constant_26694" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26694"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4035" name="MVN_26695" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_26714, Concat_26759, MVN_26695, Multiply_26742, Reshape_26715, Reshape_26760, onnx::Reshape_6376"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>30720</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6376">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>30720</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4036" name="onnx::Reshape_6377" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6377"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_6377">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4037" name="onnx::Mul_6378" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6378"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>30720</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6378">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4038" name="Constant_150543" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 960, 1, 1" offset="3271201584" size="3840"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4039" name="onnx::Add_6381" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6381"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6381">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4040" name="Constant_150544" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 960, 1, 1" offset="3271205424" size="3840"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4041" name="onnx::Cast_6384" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.896, onnx::Cast_6384"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.896,onnx::Cast_6384">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4042" name="input.900" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.900, onnx::Mul_6386"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="input.900">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4043" name="m.up_blocks.2.resnets.2.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 960, 3, 3" offset="3271209264" size="22118400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.2.conv1.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.2.conv1.weight">
-					<dim>640</dim>
-					<dim>960</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4044" name="Convolution_26800" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_26800"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>960</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4045" name="Reshape_26820" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3293327664" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4046" name="onnx::Add_6388" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_26819, Reshape_26820, onnx::Add_6388"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6388">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4047" name="onnx::Gemm_6390" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_6390, onnx::Mul_6389"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_6390">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4048" name="m.up_blocks.2.resnets.2.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 1280" offset="3293330224" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.2.time_emb_proj.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.2.time_emb_proj.weight">
-					<dim>640</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4049" name="MatMul_26852" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_26852"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4050" name="Constant_150545" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640" offset="3296607024" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4051" name="onnx::Unsqueeze_6391" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_26853, onnx::Unsqueeze_6391"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_6391">
-					<dim>2</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4052" name="onnx::Unsqueeze_6392" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6392"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6392">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4053" name="onnx::Unsqueeze_6393" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6393"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_6393">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4054" name="onnx::Unsqueeze_6394" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6394"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6394">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4055" name="onnx::Add_6395" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6395"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6395">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4056" name="onnx::Cast_6396" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.904, onnx::Cast_6396"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.904,onnx::Cast_6396">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4057" name="onnx::Reshape_6398" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6398"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_6398">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4058" name="onnx::InstanceNormalization_6399" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_6399"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_6399">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4059" name="Constant_26870" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_26870"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4060" name="MVN_26871" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_26890, Concat_26935, MVN_26871, Multiply_26918, Reshape_26891, Reshape_26936, onnx::Reshape_6402"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6402">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4061" name="onnx::Reshape_6403" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6403"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_6403">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4062" name="onnx::Mul_6404" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6404"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6404">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4063" name="Constant_150546" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3296609584" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4064" name="onnx::Add_6407" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6407"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6407">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4065" name="Constant_150547" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3296612144" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4066" name="onnx::Cast_6410" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.908, onnx::Cast_6410"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.908,onnx::Cast_6410">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4067" name="input.912" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.912, onnx::Mul_6412"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="input.912">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4068" name="m.up_blocks.2.resnets.2.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="3296614704" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.resnets.2.conv2.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.resnets.2.conv2.weight">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4069" name="Convolution_26976" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_26976"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4070" name="Reshape_26996" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3311360304" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4071" name="onnx::Add_6414" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_26995, Reshape_26996, onnx::Add_6414"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6414">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4072" name="onnx::Div_6416" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.916, onnx::Div_6416"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.916,onnx::Div_6416">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4073" name="onnx::Reshape_6431" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6431"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_6431">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4074" name="onnx::InstanceNormalization_6432" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_6432"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_6432">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4075" name="Constant_27100" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27100"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4076" name="MVN_27101" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_27120, Concat_27165, MVN_27101, Multiply_27148, Reshape_27121, Reshape_27166, onnx::Reshape_6435"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6435">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4077" name="onnx::Reshape_6436" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6419, onnx::Gather_6422, onnx::Gather_6425, onnx::Gather_6428, onnx::Reshape_6436"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6419,onnx::Gather_6422,onnx::Gather_6425,onnx::Gather_6428,onnx::Reshape_6436">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4078" name="onnx::Mul_6437" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6437"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>20480</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6437">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4079" name="Constant_150548" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3311362864" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4080" name="onnx::Add_6440" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6440"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6440">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4081" name="Constant_150549" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3311365424" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4082" name="input.920" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.920"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.920">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4083" name="m.up_blocks.2.attentions.2.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="3311367984" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.attentions.2.proj_in.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.attentions.2.proj_in.weight">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4084" name="Convolution_27203" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_27203"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4085" name="Reshape_27223" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3313006384" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4086" name="onnx::Transpose_6444" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_27222, Reshape_27223, onnx::Transpose_6444"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6444">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4087" name="Constant_27251" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27251"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4088" name="onnx::Reshape_6445" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6445"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6445">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4089" name="Constant_89724" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27077, onnx::Concat_6448, onnx::Gather_6420, onnx::Unsqueeze_6421, onnx::Unsqueeze_6447"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4090" name="Constant_27077" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27077"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4091" name="onnx::Unsqueeze_6421" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27077, onnx::Concat_6448, onnx::Gather_6420, onnx::Unsqueeze_6421, onnx::Unsqueeze_6447"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6448">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4092" name="onnx::Gather_6426" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6426"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6426"/>
-			</output>
-		</layer>
-		<layer id="4093" name="Constant_27085" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27085"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4094" name="onnx::Mul_6427" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27085, onnx::Gather_6426, onnx::Mul_6427"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_6427"/>
-			</output>
-		</layer>
-		<layer id="4095" name="onnx::Gather_6429" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6429"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6429"/>
-			</output>
-		</layer>
-		<layer id="4096" name="Constant_27089" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27089"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4097" name="onnx::Mul_6430" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27089, onnx::Gather_6429, onnx::Mul_6430"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_6430"/>
-			</output>
-		</layer>
-		<layer id="4098" name="onnx::Unsqueeze_6446" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6446"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6446"/>
-			</output>
-		</layer>
-		<layer id="4099" name="onnx::Unsqueeze_6449" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6449"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6449">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4100" name="onnx::Concat_6450" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6450, onnx::Unsqueeze_6449"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6450">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4101" name="Constant_89733" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27081, onnx::Concat_6452, onnx::Gather_6423, onnx::Unsqueeze_6424, onnx::Unsqueeze_6451"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4102" name="Constant_27081" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27081"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4103" name="onnx::Unsqueeze_6424" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27081, onnx::Concat_6452, onnx::Gather_6423, onnx::Unsqueeze_6424, onnx::Unsqueeze_6451"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6452">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4104" name="onnx::Reshape_6453" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6453"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6453">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4105" name="input.924" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.924"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.924">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4106" name="Constant_27336" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27336"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4107" name="onnx::Mul_6463" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6459, onnx::Div_6462, onnx::Mul_6463, onnx::Pow_6456, onnx::ReduceMean_6458, onnx::Sqrt_6461, onnx::Sub_6455"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6463">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4108" name="Constant_150550" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3313008944" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4109" name="onnx::Add_6464" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6464"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6464">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4110" name="Constant_150551" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3313011504" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4111" name="onnx::MatMul_6465" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6465"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6465">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4112" name="Constant_148356" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3313014064" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9041, q.191"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4113" name="q.191" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9041, q.191"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.191">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4114" name="Constant_108924" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4115" name="onnx::Gather_6472" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6472, onnx::Gather_6475, onnx::Gather_6478"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6472,onnx::Gather_6475,onnx::Gather_6478">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4116" name="onnx::Gather_6479" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6479"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6479"/>
-			</output>
-		</layer>
-		<layer id="4117" name="Constant_27362" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27362"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4118" name="onnx::Div_6480" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27362, onnx::Div_6480, onnx::Gather_6479"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6480"/>
-			</output>
-		</layer>
-		<layer id="4119" name="onnx::Div_6481" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6481"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6481"/>
-			</output>
-		</layer>
-		<layer id="4120" name="onnx::Cast_6482" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6482, onnx::Cast_6483, onnx::Div_6481, onnx::Unsqueeze_6484"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6482,onnx::Cast_6483,onnx::Unsqueeze_6484"/>
-			</output>
-		</layer>
-		<layer id="4121" name="onnx::Unsqueeze_6492" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6492"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6492">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4122" name="onnx::Concat_6493" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6493, onnx::Unsqueeze_6492"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6493">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4123" name="onnx::Reshape_6494" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6494"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4124" name="onnx::Transpose_6495" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6495"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6495">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4125" name="Constant_27475" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27475"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4126" name="onnx::Reshape_6496" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6496"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6496">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4127" name="onnx::Gather_6473" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6473"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6473"/>
-			</output>
-		</layer>
-		<layer id="4128" name="Constant_27354" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27354"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4129" name="onnx::Unsqueeze_6474" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27354, onnx::Gather_6473, onnx::Unsqueeze_6474"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6474"/>
-			</output>
-		</layer>
-		<layer id="4130" name="onnx::Mul_6497" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6497"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6497"/>
-			</output>
-		</layer>
-		<layer id="4131" name="onnx::Unsqueeze_6498" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6497, onnx::Unsqueeze_6498"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6498"/>
-			</output>
-		</layer>
-		<layer id="4132" name="onnx::Unsqueeze_6503" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6503"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6503">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4133" name="onnx::Concat_6504" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6504, onnx::Unsqueeze_6503"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6504">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4134" name="Constant_89760" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27358, onnx::Concat_6506, onnx::Gather_6476, onnx::Unsqueeze_6477, onnx::Unsqueeze_6505"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4135" name="Constant_27358" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27358"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4136" name="onnx::Unsqueeze_6477" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27358, onnx::Concat_6506, onnx::Gather_6476, onnx::Unsqueeze_6477, onnx::Unsqueeze_6505"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6506">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4137" name="onnx::Div_6499" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6499"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6499"/>
-			</output>
-		</layer>
-		<layer id="4138" name="onnx::Cast_6500" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6500, onnx::Cast_6501, onnx::Div_6499, onnx::Unsqueeze_6502"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6500,onnx::Cast_6501,onnx::Unsqueeze_6502"/>
-			</output>
-		</layer>
-		<layer id="4139" name="onnx::Unsqueeze_6507" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6507"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6507">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4140" name="onnx::Concat_6508" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6508, onnx::Unsqueeze_6507"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6508">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4141" name="onnx::Reshape_6509" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6509"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6509">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4142" name="q.195" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.195"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.195">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4143" name="Constant_148363" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3314652464" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.191, onnx::MatMul_9042"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4144" name="k.191" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.191, onnx::MatMul_9042"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.191">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4145" name="Constant_108993" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4146" name="onnx::Gather_6511" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6511, onnx::Gather_6514, onnx::Gather_6517"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6511,onnx::Gather_6514,onnx::Gather_6517">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4147" name="onnx::Gather_6518" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6518"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6518"/>
-			</output>
-		</layer>
-		<layer id="4148" name="Constant_27603" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27603"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4149" name="onnx::Div_6519" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27603, onnx::Div_6519, onnx::Gather_6518"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6519"/>
-			</output>
-		</layer>
-		<layer id="4150" name="onnx::Div_6520" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6520"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6520"/>
-			</output>
-		</layer>
-		<layer id="4151" name="onnx::Cast_6521" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6521, onnx::Cast_6522, onnx::Div_6520, onnx::Unsqueeze_6523"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6521,onnx::Cast_6522,onnx::Unsqueeze_6523"/>
-			</output>
-		</layer>
-		<layer id="4152" name="onnx::Unsqueeze_6531" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6531"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6531">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4153" name="onnx::Concat_6532" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6532, onnx::Unsqueeze_6531"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6532">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4154" name="onnx::Reshape_6533" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6533"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4155" name="onnx::Transpose_6534" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6534"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6534">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4156" name="Constant_27716" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27716"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4157" name="onnx::Reshape_6535" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6535"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6535">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4158" name="onnx::Gather_6512" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6512"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6512"/>
-			</output>
-		</layer>
-		<layer id="4159" name="Constant_27595" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27595"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4160" name="onnx::Unsqueeze_6513" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27595, onnx::Gather_6512, onnx::Unsqueeze_6513"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6513"/>
-			</output>
-		</layer>
-		<layer id="4161" name="onnx::Mul_6536" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6536"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6536"/>
-			</output>
-		</layer>
-		<layer id="4162" name="onnx::Unsqueeze_6537" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6536, onnx::Unsqueeze_6537"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6537"/>
-			</output>
-		</layer>
-		<layer id="4163" name="onnx::Unsqueeze_6542" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6542"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6542">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4164" name="onnx::Concat_6543" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6543, onnx::Unsqueeze_6542"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6543">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4165" name="Constant_89787" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27599, onnx::Concat_6545, onnx::Gather_6515, onnx::Unsqueeze_6516, onnx::Unsqueeze_6544"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4166" name="Constant_27599" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27599"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4167" name="onnx::Unsqueeze_6516" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27599, onnx::Concat_6545, onnx::Gather_6515, onnx::Unsqueeze_6516, onnx::Unsqueeze_6544"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6545">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4168" name="onnx::Div_6538" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6538"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6538"/>
-			</output>
-		</layer>
-		<layer id="4169" name="onnx::Cast_6539" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6539, onnx::Cast_6540, onnx::Div_6538, onnx::Unsqueeze_6541"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6539,onnx::Cast_6540,onnx::Unsqueeze_6541"/>
-			</output>
-		</layer>
-		<layer id="4170" name="onnx::Unsqueeze_6546" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6546"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6546">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4171" name="onnx::Concat_6547" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6547, onnx::Unsqueeze_6546"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6547">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4172" name="onnx::Reshape_6548" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6548"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6548">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4173" name="k.195" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.195"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.195">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4174" name="onnx::Mul_6589" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6589"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6589">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4175" name="Constant_150552" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4176" name="onnx::Softmax_6591" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_6591"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_6591">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4177" name="attn.95" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.95"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="attn.95">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4178" name="Constant_148370" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3316290864" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9043, v.191"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4179" name="v.191" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9043, v.191"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.191">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4180" name="Constant_109062" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4181" name="onnx::Gather_6550" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6550, onnx::Gather_6553, onnx::Gather_6556"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6550,onnx::Gather_6553,onnx::Gather_6556">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4182" name="onnx::Gather_6557" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6557"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6557"/>
-			</output>
-		</layer>
-		<layer id="4183" name="Constant_27844" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27844"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4184" name="onnx::Div_6558" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27844, onnx::Div_6558, onnx::Gather_6557"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6558"/>
-			</output>
-		</layer>
-		<layer id="4185" name="onnx::Div_6559" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6559"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6559"/>
-			</output>
-		</layer>
-		<layer id="4186" name="onnx::Cast_6560" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6560, onnx::Cast_6561, onnx::Div_6559, onnx::Unsqueeze_6562"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6560,onnx::Cast_6561,onnx::Unsqueeze_6562"/>
-			</output>
-		</layer>
-		<layer id="4187" name="onnx::Unsqueeze_6570" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6570"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6570">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4188" name="onnx::Concat_6571" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6571, onnx::Unsqueeze_6570"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6571">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4189" name="onnx::Reshape_6572" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6572"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4190" name="onnx::Transpose_6573" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6573"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6573">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4191" name="Constant_27957" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27957"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4192" name="onnx::Reshape_6574" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6574"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6574">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4193" name="onnx::Gather_6551" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6551"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6551"/>
-			</output>
-		</layer>
-		<layer id="4194" name="Constant_27836" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27836"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4195" name="onnx::Unsqueeze_6552" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27836, onnx::Gather_6551, onnx::Unsqueeze_6552"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6552"/>
-			</output>
-		</layer>
-		<layer id="4196" name="onnx::Mul_6575" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6575"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6575"/>
-			</output>
-		</layer>
-		<layer id="4197" name="onnx::Unsqueeze_6576" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6575, onnx::Unsqueeze_6576"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6576"/>
-			</output>
-		</layer>
-		<layer id="4198" name="onnx::Unsqueeze_6581" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6581"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6581">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4199" name="onnx::Concat_6582" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6582, onnx::Unsqueeze_6581"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6582">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4200" name="Constant_89814" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27840, onnx::Concat_6584, onnx::Gather_6554, onnx::Unsqueeze_6555, onnx::Unsqueeze_6583"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4201" name="Constant_27840" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27840"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4202" name="onnx::Unsqueeze_6555" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_27840, onnx::Concat_6584, onnx::Gather_6554, onnx::Unsqueeze_6555, onnx::Unsqueeze_6583"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6584">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4203" name="onnx::Div_6577" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6577"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6577"/>
-			</output>
-		</layer>
-		<layer id="4204" name="onnx::Cast_6578" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6578, onnx::Cast_6579, onnx::Div_6577, onnx::Unsqueeze_6580"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6578,onnx::Cast_6579,onnx::Unsqueeze_6580"/>
-			</output>
-		</layer>
-		<layer id="4205" name="onnx::Unsqueeze_6585" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6585"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6585">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4206" name="onnx::Concat_6586" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6586, onnx::Unsqueeze_6585"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6586">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4207" name="onnx::Reshape_6587" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6587"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6587">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4208" name="v.195" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.195"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.195">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4209" name="out.95" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.95"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>1024</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="out.95">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4210" name="onnx::Gather_6594" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6594, onnx::Gather_6597, onnx::Gather_6600"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6594,onnx::Gather_6597,onnx::Gather_6600">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4211" name="onnx::Gather_6595" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6595"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6595"/>
-			</output>
-		</layer>
-		<layer id="4212" name="Constant_28082" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28082"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4213" name="onnx::Div_6596" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28082, onnx::Div_6596, onnx::Gather_6595"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6596"/>
-			</output>
-		</layer>
-		<layer id="4214" name="onnx::Div_6603" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6603"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6603"/>
-			</output>
-		</layer>
-		<layer id="4215" name="onnx::Cast_6604" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6604, onnx::Cast_6605, onnx::Div_6603, onnx::Unsqueeze_6606"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6604,onnx::Cast_6605,onnx::Unsqueeze_6606"/>
-			</output>
-		</layer>
-		<layer id="4216" name="onnx::Unsqueeze_6608" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6608"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6608">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4217" name="onnx::Concat_6609" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6609, onnx::Unsqueeze_6608"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6609">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4218" name="Constant_90912" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6616"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4219" name="Constant_90913" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4220" name="Gather_90914" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6616"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4221" name="onnx::Reshape_6616" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6616"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6616">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4222" name="onnx::Transpose_6617" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6617"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6617">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4223" name="Constant_28203" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28203"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4224" name="onnx::Reshape_6618" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6618"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6618">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4225" name="onnx::Div_6619" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6619"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6619"/>
-			</output>
-		</layer>
-		<layer id="4226" name="onnx::Cast_6620" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6620, onnx::Cast_6621, onnx::Div_6619, onnx::Unsqueeze_6622"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6620,onnx::Cast_6621,onnx::Unsqueeze_6622"/>
-			</output>
-		</layer>
-		<layer id="4227" name="onnx::Unsqueeze_6625" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6625"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6625">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4228" name="onnx::Concat_6626" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6626, onnx::Unsqueeze_6625"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6626">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4229" name="Constant_89841" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28086, onnx::Concat_6628, onnx::Gather_6598, onnx::Unsqueeze_6599, onnx::Unsqueeze_6627"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4230" name="Constant_28086" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28086"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4231" name="onnx::Unsqueeze_6599" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28086, onnx::Concat_6628, onnx::Gather_6598, onnx::Unsqueeze_6599, onnx::Unsqueeze_6627"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6628">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4232" name="onnx::Gather_6601" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6601"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6601"/>
-			</output>
-		</layer>
-		<layer id="4233" name="Constant_28090" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28090"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4234" name="onnx::Unsqueeze_6602" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28090, onnx::Gather_6601, onnx::Unsqueeze_6602"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6602"/>
-			</output>
-		</layer>
-		<layer id="4235" name="onnx::Mul_6623" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6623"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6623"/>
-			</output>
-		</layer>
-		<layer id="4236" name="onnx::Unsqueeze_6624" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6623, onnx::Unsqueeze_6624"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6624"/>
-			</output>
-		</layer>
-		<layer id="4237" name="onnx::Unsqueeze_6629" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6629"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6629">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4238" name="onnx::Concat_6630" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6630, onnx::Unsqueeze_6629"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6630">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4239" name="onnx::Reshape_6631" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6631"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6631">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4240" name="onnx::MatMul_6632" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6632"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6632">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4241" name="Constant_148377" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3317929264" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6634, onnx::MatMul_9048"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4242" name="onnx::Add_6634" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6634, onnx::MatMul_9048"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6634">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4243" name="input.928" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.928"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.928">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4244" name="input.932" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.932"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.932">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4245" name="Constant_28324" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28324"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4246" name="onnx::Mul_6645" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6641, onnx::Div_6644, onnx::Mul_6645, onnx::Pow_6638, onnx::ReduceMean_6640, onnx::Sqrt_6643, onnx::Sub_6637"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6645">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4247" name="Constant_150554" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3319567664" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4248" name="onnx::Add_6646" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6646"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6646">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4249" name="Constant_150555" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3319570224" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4250" name="onnx::MatMul_6647" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6647"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6647">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4251" name="Constant_148385" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3319572784" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9049, q.199"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4252" name="q.199" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9049, q.199"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.199">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4253" name="Constant_109131" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4254" name="onnx::Gather_6654" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6654, onnx::Gather_6657, onnx::Gather_6660"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6654,onnx::Gather_6657,onnx::Gather_6660">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4255" name="onnx::Gather_6661" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6661"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6661"/>
-			</output>
-		</layer>
-		<layer id="4256" name="Constant_28350" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28350"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4257" name="onnx::Div_6662" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28350, onnx::Div_6662, onnx::Gather_6661"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6662"/>
-			</output>
-		</layer>
-		<layer id="4258" name="onnx::Div_6663" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6663"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6663"/>
-			</output>
-		</layer>
-		<layer id="4259" name="onnx::Cast_6664" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6664, onnx::Cast_6665, onnx::Div_6663, onnx::Unsqueeze_6666"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6664,onnx::Cast_6665,onnx::Unsqueeze_6666"/>
-			</output>
-		</layer>
-		<layer id="4260" name="onnx::Unsqueeze_6674" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6674"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6674">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4261" name="onnx::Concat_6675" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6675, onnx::Unsqueeze_6674"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6675">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4262" name="onnx::Reshape_6676" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6676"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4263" name="onnx::Transpose_6677" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6677"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6677">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4264" name="Constant_28463" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28463"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4265" name="onnx::Reshape_6678" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6678"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6678">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4266" name="onnx::Gather_6655" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6655"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6655"/>
-			</output>
-		</layer>
-		<layer id="4267" name="Constant_28342" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28342"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4268" name="onnx::Unsqueeze_6656" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28342, onnx::Gather_6655, onnx::Unsqueeze_6656"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6656"/>
-			</output>
-		</layer>
-		<layer id="4269" name="onnx::Mul_6679" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6679"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6679"/>
-			</output>
-		</layer>
-		<layer id="4270" name="onnx::Unsqueeze_6680" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6679, onnx::Unsqueeze_6680"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6680"/>
-			</output>
-		</layer>
-		<layer id="4271" name="onnx::Unsqueeze_6685" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6685"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6685">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4272" name="onnx::Concat_6686" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6686, onnx::Unsqueeze_6685"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6686">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4273" name="Constant_89868" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28346, onnx::Concat_6688, onnx::Gather_6658, onnx::Unsqueeze_6659, onnx::Unsqueeze_6687"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4274" name="Constant_28346" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28346"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4275" name="onnx::Unsqueeze_6659" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28346, onnx::Concat_6688, onnx::Gather_6658, onnx::Unsqueeze_6659, onnx::Unsqueeze_6687"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6688">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4276" name="onnx::Div_6681" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6681"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6681"/>
-			</output>
-		</layer>
-		<layer id="4277" name="onnx::Cast_6682" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6682, onnx::Cast_6683, onnx::Div_6681, onnx::Unsqueeze_6684"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6682,onnx::Cast_6683,onnx::Unsqueeze_6684"/>
-			</output>
-		</layer>
-		<layer id="4278" name="onnx::Unsqueeze_6689" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6689"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6689">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4279" name="onnx::Concat_6690" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6690, onnx::Unsqueeze_6689"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6690">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4280" name="onnx::Reshape_6691" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6691"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6691">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4281" name="q.203" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.203"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.203">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4282" name="Constant_148392" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="3321211184" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.199, onnx::MatMul_9050"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4283" name="k.199" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.199, onnx::MatMul_9050"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.199">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4284" name="onnx::Transpose_6706" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6706"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6706">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4285" name="Constant_28584" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28584"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4286" name="onnx::Reshape_6707" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6707"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6707">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4287" name="k.203" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.203"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.203">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4288" name="onnx::Mul_6743" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6743"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6743">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4289" name="Constant_150556" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="82029992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4290" name="onnx::Softmax_6745" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_6745"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_6745">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4291" name="attn.99" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.99"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="attn.99">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4292" name="Constant_148399" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 768" offset="3323177264" size="1966080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9051, v.199"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4293" name="v.199" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9051, v.199"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>768</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.199">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4294" name="onnx::Transpose_6731" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6731"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6731">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4295" name="Constant_28592" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28592"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4296" name="onnx::Reshape_6732" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6732"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6732">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4297" name="v.203" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.203"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.203">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4298" name="out.99" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.99"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="out.99">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4299" name="onnx::Gather_6748" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6748, onnx::Gather_6751, onnx::Gather_6754"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6748,onnx::Gather_6751,onnx::Gather_6754">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4300" name="onnx::Gather_6749" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6749"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6749"/>
-			</output>
-		</layer>
-		<layer id="4301" name="Constant_28604" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28604"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4302" name="onnx::Div_6750" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28604, onnx::Div_6750, onnx::Gather_6749"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6750"/>
-			</output>
-		</layer>
-		<layer id="4303" name="onnx::Div_6757" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6757"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6757"/>
-			</output>
-		</layer>
-		<layer id="4304" name="onnx::Cast_6758" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6758, onnx::Cast_6759, onnx::Div_6757, onnx::Unsqueeze_6760"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6758,onnx::Cast_6759,onnx::Unsqueeze_6760"/>
-			</output>
-		</layer>
-		<layer id="4305" name="onnx::Unsqueeze_6762" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6762"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6762">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4306" name="onnx::Concat_6763" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6763, onnx::Unsqueeze_6762"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6763">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4307" name="Constant_90922" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6770"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4308" name="Constant_90923" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4309" name="Gather_90924" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6770"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4310" name="onnx::Reshape_6770" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6770"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6770">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4311" name="onnx::Transpose_6771" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6771"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6771">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4312" name="Constant_28725" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28725"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4313" name="onnx::Reshape_6772" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6772"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>1024</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6772">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4314" name="onnx::Div_6773" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6773"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6773"/>
-			</output>
-		</layer>
-		<layer id="4315" name="onnx::Cast_6774" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6774, onnx::Cast_6775, onnx::Div_6773, onnx::Unsqueeze_6776"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6774,onnx::Cast_6775,onnx::Unsqueeze_6776"/>
-			</output>
-		</layer>
-		<layer id="4316" name="onnx::Unsqueeze_6779" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6779"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6779">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4317" name="onnx::Concat_6780" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6780, onnx::Unsqueeze_6779"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6780">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4318" name="Constant_89895" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28608, onnx::Concat_6782, onnx::Gather_6752, onnx::Unsqueeze_6753, onnx::Unsqueeze_6781"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4319" name="Constant_28608" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28608"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4320" name="onnx::Unsqueeze_6753" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28608, onnx::Concat_6782, onnx::Gather_6752, onnx::Unsqueeze_6753, onnx::Unsqueeze_6781"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6782">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4321" name="onnx::Gather_6755" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6755"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6755"/>
-			</output>
-		</layer>
-		<layer id="4322" name="Constant_28612" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28612"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4323" name="onnx::Unsqueeze_6756" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28612, onnx::Gather_6755, onnx::Unsqueeze_6756"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6756"/>
-			</output>
-		</layer>
-		<layer id="4324" name="onnx::Mul_6777" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6777"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6777"/>
-			</output>
-		</layer>
-		<layer id="4325" name="onnx::Unsqueeze_6778" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6777, onnx::Unsqueeze_6778"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6778"/>
-			</output>
-		</layer>
-		<layer id="4326" name="onnx::Unsqueeze_6783" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6783"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6783">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4327" name="onnx::Concat_6784" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6784, onnx::Unsqueeze_6783"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6784">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4328" name="onnx::Reshape_6785" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6785"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6785">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4329" name="onnx::MatMul_6786" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6786"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>8</dim>
-					<dim>80</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6786">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4330" name="Constant_148406" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640" offset="3325143344" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6788, onnx::MatMul_9072"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4331" name="onnx::Add_6788" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6788, onnx::MatMul_9072"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6788">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4332" name="input.936" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.936"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.936">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4333" name="input.940" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.940"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.940">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4334" name="Constant_28846" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28846"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4335" name="onnx::Mul_6799" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6795, onnx::Div_6798, onnx::Mul_6799, onnx::Pow_6792, onnx::ReduceMean_6794, onnx::Sqrt_6797, onnx::Sub_6791"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6799">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4336" name="Constant_150558" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3326781744" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4337" name="onnx::Add_6800" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6800"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6800">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4338" name="Constant_150559" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 640" offset="3326784304" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4339" name="onnx::MatMul_6801" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6801"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6801">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4340" name="Constant_148414" type="Const" version="opset1">
-			<data element_type="f32" shape="5120, 640" offset="3326786864" size="13107200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6803, onnx::MatMul_9073"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>5120</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4341" name="onnx::Add_6803" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6803, onnx::MatMul_9073"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>5120</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6803">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4342" name="onnx::Shape_6804" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_6804"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_6804">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4343" name="Constant_128079" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4344" name="Constant_128080" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4345" name="Constant_128076" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4346" name="onnx::Gather_6805" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6805"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6805">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4347" name="onnx::Gather_6806" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6806">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4348" name="Constant_28863" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28863"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4349" name="onnx::Add_6807" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_28863, onnx::Add_6807, onnx::Gather_6806"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Add_6807">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4350" name="onnx::Add_6809" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6809"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Add_6809">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4351" name="onnx::Div_6810" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6809, onnx::Div_6810"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Div_6810">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4352" name="onnx::Div_6811" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6811"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6811">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4353" name="onnx::Mul_6812" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6811, onnx::Mul_6812, onnx::Mul_6813, onnx::Slice_6814"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Mul_6812,onnx::Slice_6814">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4354" name="Constant_128075" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4355" name="ScatterUpdate_128081" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4356" name="Constant_128084" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4357" name="onnx::Mul_6815" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28874, onnx::Gather_6806, onnx::Mul_6815"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_6815">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4358" name="Constant_128148" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4359" name="Constant_128147" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4360" name="Constant_128146" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4361" name="ScatterUpdate_128149" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4362" name="Constant_128150" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4363" name="onnx::Mul_6816" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6816"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6816">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4364" name="onnx::Slice_6817" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6816, onnx::Slice_6817"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Slice_6817">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4365" name="ScatterUpdate_128151" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4366" name="Constant_128154" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4367" name="onnx::Div_6818" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_28943, onnx::Div_6818, onnx::Gather_6806"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>5120</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="4" precision="FP32" names="onnx::Div_6818">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4368" name="onnx::Mul_6826" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6821, onnx::Erf_6820, onnx::Mul_6823, onnx::Mul_6824, onnx::Mul_6826"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_6826">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4369" name="input.944" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.944"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.944">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4370" name="Constant_148422" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 2560" offset="3339894064" size="6553600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6829, onnx::MatMul_9074"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>640</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4371" name="onnx::Add_6829" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6829, onnx::MatMul_9074"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6829">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4372" name="onnx::Add_6830" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6830"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6830">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4373" name="onnx::Reshape_6831" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6831"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6831">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4374" name="Constant_90935" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6840"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4375" name="Constant_90936" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4376" name="Gather_90937" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6840"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6840">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4377" name="onnx::Transpose_6841" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6841"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1024</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6841">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4378" name="Constant_29108" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29108"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4379" name="input.948" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.948"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-					<dim>640</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.948">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4380" name="m.up_blocks.2.attentions.2.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 1, 1" offset="3346447664" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.attentions.2.proj_out.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.attentions.2.proj_out.weight">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4381" name="Convolution_29110" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_29110"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4382" name="Reshape_29130" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3348086064" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4383" name="onnx::Add_6843" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29129, Reshape_29130, onnx::Add_6843"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6843">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4384" name="x.7" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="x.7"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="x.7">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4385" name="ShapeOf_29160" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="ShapeOf_29160"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4386" name="Convert_29161" type="Convert" version="opset1">
-			<data destination_type="f32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convert_29161"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4387" name="Multiply_29162" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_29162"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4388" name="Convert_29163" type="Convert" version="opset1">
-			<data destination_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convert_29163"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4389" name="input.952" type="Interpolate" version="opset4">
-			<data mode="nearest" shape_calculation_mode="scales" coordinate_transformation_mode="asymmetric" nearest_mode="floor" antialias="false" pads_begin="0, 0, 0, 0" pads_end="0, 0, 0, 0" cube_coeff="-0.75"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.952"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>32</dim>
-					<dim>32</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="FP32">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="FP32" names="input.952">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4390" name="m.up_blocks.2.upsamplers.0.conv.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="640, 640, 3, 3" offset="3348088624" size="14745600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.2.upsamplers.0.conv.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.2.upsamplers.0.conv.weight">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4391" name="Convolution_29165" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_29165"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>640</dim>
-					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4392" name="Reshape_29185" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3362834224" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4393" name="onnx::Concat_6850" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29184, Reshape_29185, onnx::Concat_6850"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_6850">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4394" name="onnx::Cast_6851" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.956, onnx::Cast_6851"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.956,onnx::Cast_6851">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4395" name="m.up_blocks.3.resnets.0.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 960, 1, 1" offset="3362836784" size="1228800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.0.conv_shortcut.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.0.conv_shortcut.weight">
-					<dim>320</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4396" name="Convolution_29554" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_29554"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4397" name="Reshape_29574" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3364065584" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4398" name="onnx::Add_6896" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29573, Reshape_29574, onnx::Add_6896"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6896">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4399" name="onnx::Reshape_6853" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6853"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_6853">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4400" name="onnx::InstanceNormalization_6854" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_6854"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_6854">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>122880</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4401" name="Constant_29224" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29224"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4402" name="MVN_29225" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29244, Concat_29289, MVN_29225, Multiply_29272, Reshape_29245, Reshape_29290, onnx::Reshape_6857"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>122880</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6857">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>122880</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4403" name="onnx::Reshape_6858" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6858"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_6858">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4404" name="onnx::Mul_6859" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6859"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>122880</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6859">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4405" name="Constant_150562" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 960, 1, 1" offset="3364066864" size="3840"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4406" name="onnx::Add_6862" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6862"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6862">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4407" name="Constant_150563" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 960, 1, 1" offset="3364070704" size="3840"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4408" name="onnx::Cast_6865" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.960, onnx::Cast_6865"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>960</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.960,onnx::Cast_6865">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4409" name="input.964" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.964, onnx::Mul_6867"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="input.964">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4410" name="m.up_blocks.3.resnets.0.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 960, 3, 3" offset="3364074544" size="11059200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.0.conv1.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.0.conv1.weight">
-					<dim>320</dim>
-					<dim>960</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4411" name="Convolution_29330" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_29330"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>960</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>960</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4412" name="Reshape_29350" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3375133744" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4413" name="onnx::Add_6869" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29349, Reshape_29350, onnx::Add_6869"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6869">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4414" name="onnx::Gemm_6871" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_6871, onnx::Mul_6870"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_6871">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4415" name="m.up_blocks.3.resnets.0.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="3375135024" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.0.time_emb_proj.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.0.time_emb_proj.weight">
-					<dim>320</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4416" name="MatMul_29382" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_29382"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4417" name="Constant_150564" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320" offset="3376773424" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4418" name="onnx::Unsqueeze_6872" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_29383, onnx::Unsqueeze_6872"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_6872">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4419" name="onnx::Unsqueeze_6873" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6873"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6873">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4420" name="onnx::Unsqueeze_6874" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6874"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_6874">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4421" name="onnx::Unsqueeze_6875" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6875"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6875">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4422" name="onnx::Add_6876" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6876"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6876">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4423" name="onnx::Cast_6877" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.968, onnx::Cast_6877"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.968,onnx::Cast_6877">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4424" name="onnx::Reshape_6879" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6879"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_6879">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4425" name="onnx::InstanceNormalization_6880" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_6880"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_6880">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4426" name="Constant_29400" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29400"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4427" name="MVN_29401" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29420, Concat_29465, MVN_29401, Multiply_29448, Reshape_29421, Reshape_29466, onnx::Reshape_6883"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6883">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4428" name="onnx::Reshape_6884" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6884"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_6884">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4429" name="onnx::Mul_6885" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6885"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6885">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4430" name="Constant_150565" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3376774704" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4431" name="onnx::Add_6888" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6888"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6888">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4432" name="Constant_150566" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3376775984" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4433" name="onnx::Cast_6891" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.972, onnx::Cast_6891"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.972,onnx::Cast_6891">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4434" name="input.976" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.976, onnx::Mul_6893"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="input.976">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4435" name="m.up_blocks.3.resnets.0.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="3376777264" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.0.conv2.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.0.conv2.weight">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4436" name="Convolution_29506" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_29506"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4437" name="Reshape_29526" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3380463664" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4438" name="onnx::Add_6895" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29525, Reshape_29526, onnx::Add_6895"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6895">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4439" name="onnx::Div_6897" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.980, onnx::Div_6897"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.980,onnx::Div_6897">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4440" name="onnx::Reshape_6912" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6912"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_6912">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4441" name="onnx::InstanceNormalization_6913" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_6913"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_6913">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4442" name="Constant_29630" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29630"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4443" name="MVN_29631" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29650, Concat_29695, MVN_29631, Multiply_29678, Reshape_29651, Reshape_29696, onnx::Reshape_6916"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6916">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4444" name="onnx::Reshape_6917" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6900, onnx::Gather_6903, onnx::Gather_6906, onnx::Gather_6909, onnx::Reshape_6917"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6900,onnx::Gather_6903,onnx::Gather_6906,onnx::Gather_6909,onnx::Reshape_6917">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4445" name="onnx::Mul_6918" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6918"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6918">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4446" name="Constant_150567" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3380464944" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4447" name="onnx::Add_6921" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6921"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6921">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4448" name="Constant_150568" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3380466224" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4449" name="input.984" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.984"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.984">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4450" name="m.up_blocks.3.attentions.0.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="3380467504" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.attentions.0.proj_in.weight"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.attentions.0.proj_in.weight">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4451" name="Convolution_29733" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_29733"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4452" name="Reshape_29753" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3380877104" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4453" name="onnx::Transpose_6925" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_29752, Reshape_29753, onnx::Transpose_6925"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6925">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4454" name="Constant_29781" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29781"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4455" name="onnx::Reshape_6926" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6926"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6926">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4456" name="Constant_89940" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29607, onnx::Concat_6929, onnx::Gather_6901, onnx::Unsqueeze_6902, onnx::Unsqueeze_6928"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4457" name="Constant_29607" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29607"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4458" name="onnx::Unsqueeze_6902" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29607, onnx::Concat_6929, onnx::Gather_6901, onnx::Unsqueeze_6902, onnx::Unsqueeze_6928"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6929">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4459" name="onnx::Gather_6907" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6907"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6907"/>
-			</output>
-		</layer>
-		<layer id="4460" name="Constant_29615" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29615"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4461" name="onnx::Mul_6908" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29615, onnx::Gather_6907, onnx::Mul_6908"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_6908"/>
-			</output>
-		</layer>
-		<layer id="4462" name="onnx::Gather_6910" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6910"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6910"/>
-			</output>
-		</layer>
-		<layer id="4463" name="Constant_29619" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29619"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4464" name="onnx::Mul_6911" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29619, onnx::Gather_6910, onnx::Mul_6911"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Mul_6911"/>
-			</output>
-		</layer>
-		<layer id="4465" name="onnx::Unsqueeze_6927" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6927"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6927"/>
-			</output>
-		</layer>
-		<layer id="4466" name="onnx::Unsqueeze_6930" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6930"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6930">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4467" name="onnx::Concat_6931" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6931, onnx::Unsqueeze_6930"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6931">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4468" name="Constant_89949" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29611, onnx::Concat_6933, onnx::Gather_6904, onnx::Unsqueeze_6905, onnx::Unsqueeze_6932"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4469" name="Constant_29611" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29611"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4470" name="onnx::Unsqueeze_6905" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29611, onnx::Concat_6933, onnx::Gather_6904, onnx::Unsqueeze_6905, onnx::Unsqueeze_6932"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6933">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4471" name="onnx::Reshape_6934" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6934"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6934">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4472" name="input.988" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.988"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.988">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4473" name="Constant_29866" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29866"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4474" name="onnx::Mul_6944" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6940, onnx::Div_6943, onnx::Mul_6944, onnx::Pow_6937, onnx::ReduceMean_6939, onnx::Sqrt_6942, onnx::Sub_6936"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_6944">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4475" name="Constant_150569" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3380878384" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4476" name="onnx::Add_6945" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_6945"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_6945">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4477" name="Constant_150570" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3380879664" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4478" name="onnx::MatMul_6946" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_6946"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_6946">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4479" name="Constant_148433" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3380880944" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9082, q.207"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4480" name="q.207" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9082, q.207"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.207">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4481" name="Constant_109200" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4482" name="onnx::Gather_6953" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6953, onnx::Gather_6956, onnx::Gather_6959"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6953,onnx::Gather_6956,onnx::Gather_6959">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4483" name="onnx::Gather_6960" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6960"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6960"/>
-			</output>
-		</layer>
-		<layer id="4484" name="Constant_29892" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29892"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4485" name="onnx::Div_6961" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29892, onnx::Div_6961, onnx::Gather_6960"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_6961"/>
-			</output>
-		</layer>
-		<layer id="4486" name="onnx::Div_6962" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6962"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6962"/>
-			</output>
-		</layer>
-		<layer id="4487" name="onnx::Cast_6963" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6963, onnx::Cast_6964, onnx::Div_6962, onnx::Unsqueeze_6965"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6963,onnx::Cast_6964,onnx::Unsqueeze_6965"/>
-			</output>
-		</layer>
-		<layer id="4488" name="onnx::Unsqueeze_6973" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6973"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6973">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4489" name="onnx::Concat_6974" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6974, onnx::Unsqueeze_6973"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6974">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4490" name="onnx::Reshape_6975" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_6975"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4491" name="onnx::Transpose_6976" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_6976"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_6976">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4492" name="Constant_30005" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30005"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4493" name="onnx::Reshape_6977" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6977"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_6977">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4494" name="onnx::Gather_6954" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6954"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6954"/>
-			</output>
-		</layer>
-		<layer id="4495" name="Constant_29884" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29884"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4496" name="onnx::Unsqueeze_6955" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29884, onnx::Gather_6954, onnx::Unsqueeze_6955"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6955"/>
-			</output>
-		</layer>
-		<layer id="4497" name="onnx::Mul_6978" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6978"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_6978"/>
-			</output>
-		</layer>
-		<layer id="4498" name="onnx::Unsqueeze_6979" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_6978, onnx::Unsqueeze_6979"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_6979"/>
-			</output>
-		</layer>
-		<layer id="4499" name="onnx::Unsqueeze_6984" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6984"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6984">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4500" name="onnx::Concat_6985" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6985, onnx::Unsqueeze_6984"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6985">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4501" name="Constant_89976" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29888, onnx::Concat_6987, onnx::Gather_6957, onnx::Unsqueeze_6958, onnx::Unsqueeze_6986"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4502" name="Constant_29888" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29888"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4503" name="onnx::Unsqueeze_6958" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_29888, onnx::Concat_6987, onnx::Gather_6957, onnx::Unsqueeze_6958, onnx::Unsqueeze_6986"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_6987">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4504" name="onnx::Div_6980" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_6980"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_6980"/>
-			</output>
-		</layer>
-		<layer id="4505" name="onnx::Cast_6981" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_6981, onnx::Cast_6982, onnx::Div_6980, onnx::Unsqueeze_6983"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_6981,onnx::Cast_6982,onnx::Unsqueeze_6983"/>
-			</output>
-		</layer>
-		<layer id="4506" name="onnx::Unsqueeze_6988" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_6988"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_6988">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4507" name="onnx::Concat_6989" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_6989, onnx::Unsqueeze_6988"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_6989">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4508" name="onnx::Reshape_6990" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_6990"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_6990">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4509" name="q.211" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.211"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.211">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4510" name="Constant_148440" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3381290544" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.207, onnx::MatMul_9083"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4511" name="k.207" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.207, onnx::MatMul_9083"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.207">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4512" name="Constant_109269" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4513" name="onnx::Gather_6992" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6992, onnx::Gather_6995, onnx::Gather_6998"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_6992,onnx::Gather_6995,onnx::Gather_6998">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4514" name="onnx::Gather_6999" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6999"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6999"/>
-			</output>
-		</layer>
-		<layer id="4515" name="Constant_30133" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30133"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4516" name="onnx::Div_7000" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30133, onnx::Div_7000, onnx::Gather_6999"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7000"/>
-			</output>
-		</layer>
-		<layer id="4517" name="onnx::Div_7001" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7001"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7001"/>
-			</output>
-		</layer>
-		<layer id="4518" name="onnx::Cast_7002" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7002, onnx::Cast_7003, onnx::Div_7001, onnx::Unsqueeze_7004"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7002,onnx::Cast_7003,onnx::Unsqueeze_7004"/>
-			</output>
-		</layer>
-		<layer id="4519" name="onnx::Unsqueeze_7012" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7012"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7012">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4520" name="onnx::Concat_7013" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7013, onnx::Unsqueeze_7012"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7013">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4521" name="onnx::Reshape_7014" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7014"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4522" name="onnx::Transpose_7015" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7015"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7015">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4523" name="Constant_30246" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30246"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4524" name="onnx::Reshape_7016" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7016"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7016">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4525" name="onnx::Gather_6993" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_6993"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_6993"/>
-			</output>
-		</layer>
-		<layer id="4526" name="Constant_30125" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30125"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4527" name="onnx::Unsqueeze_6994" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30125, onnx::Gather_6993, onnx::Unsqueeze_6994"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_6994"/>
-			</output>
-		</layer>
-		<layer id="4528" name="onnx::Mul_7017" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7017"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7017"/>
-			</output>
-		</layer>
-		<layer id="4529" name="onnx::Unsqueeze_7018" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7017, onnx::Unsqueeze_7018"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7018"/>
-			</output>
-		</layer>
-		<layer id="4530" name="onnx::Unsqueeze_7023" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7023"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7023">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4531" name="onnx::Concat_7024" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7024, onnx::Unsqueeze_7023"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7024">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4532" name="Constant_90003" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30129, onnx::Concat_7026, onnx::Gather_6996, onnx::Unsqueeze_6997, onnx::Unsqueeze_7025"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4533" name="Constant_30129" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30129"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4534" name="onnx::Unsqueeze_6997" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30129, onnx::Concat_7026, onnx::Gather_6996, onnx::Unsqueeze_6997, onnx::Unsqueeze_7025"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7026">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4535" name="onnx::Div_7019" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7019"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7019"/>
-			</output>
-		</layer>
-		<layer id="4536" name="onnx::Cast_7020" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7020, onnx::Cast_7021, onnx::Div_7019, onnx::Unsqueeze_7022"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7020,onnx::Cast_7021,onnx::Unsqueeze_7022"/>
-			</output>
-		</layer>
-		<layer id="4537" name="onnx::Unsqueeze_7027" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7027"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7027">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4538" name="onnx::Concat_7028" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7028, onnx::Unsqueeze_7027"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7028">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4539" name="onnx::Reshape_7029" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7029"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7029">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4540" name="k.211" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.211"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.211">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4541" name="onnx::Mul_7070" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7070"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7070">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4542" name="Constant_150571" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4543" name="onnx::Softmax_7072" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_7072"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_7072">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4544" name="attn.103" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.103"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="attn.103">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4545" name="Constant_148447" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3381700144" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9084, v.207"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4546" name="v.207" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9084, v.207"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.207">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4547" name="Constant_109338" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4548" name="onnx::Gather_7031" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7031, onnx::Gather_7034, onnx::Gather_7037"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7031,onnx::Gather_7034,onnx::Gather_7037">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4549" name="onnx::Gather_7038" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7038"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7038"/>
-			</output>
-		</layer>
-		<layer id="4550" name="Constant_30374" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30374"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4551" name="onnx::Div_7039" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30374, onnx::Div_7039, onnx::Gather_7038"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7039"/>
-			</output>
-		</layer>
-		<layer id="4552" name="onnx::Div_7040" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7040"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7040"/>
-			</output>
-		</layer>
-		<layer id="4553" name="onnx::Cast_7041" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7041, onnx::Cast_7042, onnx::Div_7040, onnx::Unsqueeze_7043"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7041,onnx::Cast_7042,onnx::Unsqueeze_7043"/>
-			</output>
-		</layer>
-		<layer id="4554" name="onnx::Unsqueeze_7051" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7051"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7051">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4555" name="onnx::Concat_7052" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7052, onnx::Unsqueeze_7051"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7052">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4556" name="onnx::Reshape_7053" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7053"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4557" name="onnx::Transpose_7054" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7054"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7054">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4558" name="Constant_30487" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30487"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4559" name="onnx::Reshape_7055" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7055"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7055">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4560" name="onnx::Gather_7032" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7032"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7032"/>
-			</output>
-		</layer>
-		<layer id="4561" name="Constant_30366" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30366"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4562" name="onnx::Unsqueeze_7033" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30366, onnx::Gather_7032, onnx::Unsqueeze_7033"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7033"/>
-			</output>
-		</layer>
-		<layer id="4563" name="onnx::Mul_7056" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7056"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7056"/>
-			</output>
-		</layer>
-		<layer id="4564" name="onnx::Unsqueeze_7057" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7056, onnx::Unsqueeze_7057"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7057"/>
-			</output>
-		</layer>
-		<layer id="4565" name="onnx::Unsqueeze_7062" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7062"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7062">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4566" name="onnx::Concat_7063" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7063, onnx::Unsqueeze_7062"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7063">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4567" name="Constant_90030" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30370, onnx::Concat_7065, onnx::Gather_7035, onnx::Unsqueeze_7036, onnx::Unsqueeze_7064"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4568" name="Constant_30370" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30370"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4569" name="onnx::Unsqueeze_7036" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30370, onnx::Concat_7065, onnx::Gather_7035, onnx::Unsqueeze_7036, onnx::Unsqueeze_7064"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7065">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4570" name="onnx::Div_7058" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7058"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7058"/>
-			</output>
-		</layer>
-		<layer id="4571" name="onnx::Cast_7059" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7059, onnx::Cast_7060, onnx::Div_7058, onnx::Unsqueeze_7061"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7059,onnx::Cast_7060,onnx::Unsqueeze_7061"/>
-			</output>
-		</layer>
-		<layer id="4572" name="onnx::Unsqueeze_7066" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7066"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7066">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4573" name="onnx::Concat_7067" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7067, onnx::Unsqueeze_7066"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7067">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4574" name="onnx::Reshape_7068" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7068"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7068">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4575" name="v.211" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.211"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.211">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4576" name="out.103" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.103"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="out.103">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4577" name="onnx::Gather_7075" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7075, onnx::Gather_7078, onnx::Gather_7081"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7075,onnx::Gather_7078,onnx::Gather_7081">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4578" name="onnx::Gather_7076" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7076"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7076"/>
-			</output>
-		</layer>
-		<layer id="4579" name="Constant_30612" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30612"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4580" name="onnx::Div_7077" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30612, onnx::Div_7077, onnx::Gather_7076"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7077"/>
-			</output>
-		</layer>
-		<layer id="4581" name="onnx::Div_7084" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7084"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7084"/>
-			</output>
-		</layer>
-		<layer id="4582" name="onnx::Cast_7085" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7085, onnx::Cast_7086, onnx::Div_7084, onnx::Unsqueeze_7087"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7085,onnx::Cast_7086,onnx::Unsqueeze_7087"/>
-			</output>
-		</layer>
-		<layer id="4583" name="onnx::Unsqueeze_7089" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7089"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7089">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4584" name="onnx::Concat_7090" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7090, onnx::Unsqueeze_7089"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7090">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4585" name="Constant_90954" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7097"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4586" name="Constant_90955" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4587" name="Gather_90956" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7097"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4588" name="onnx::Reshape_7097" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7097"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7097">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4589" name="onnx::Transpose_7098" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7098"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7098">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4590" name="Constant_30733" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30733"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4591" name="onnx::Reshape_7099" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7099"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7099">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4592" name="onnx::Div_7100" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7100"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7100"/>
-			</output>
-		</layer>
-		<layer id="4593" name="onnx::Cast_7101" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7101, onnx::Cast_7102, onnx::Div_7100, onnx::Unsqueeze_7103"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7101,onnx::Cast_7102,onnx::Unsqueeze_7103"/>
-			</output>
-		</layer>
-		<layer id="4594" name="onnx::Unsqueeze_7106" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7106"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7106">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4595" name="onnx::Concat_7107" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7107, onnx::Unsqueeze_7106"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7107">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4596" name="Constant_90057" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30616, onnx::Concat_7109, onnx::Gather_7079, onnx::Unsqueeze_7080, onnx::Unsqueeze_7108"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4597" name="Constant_30616" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30616"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4598" name="onnx::Unsqueeze_7080" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30616, onnx::Concat_7109, onnx::Gather_7079, onnx::Unsqueeze_7080, onnx::Unsqueeze_7108"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7109">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4599" name="onnx::Gather_7082" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7082"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7082"/>
-			</output>
-		</layer>
-		<layer id="4600" name="Constant_30620" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30620"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4601" name="onnx::Unsqueeze_7083" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30620, onnx::Gather_7082, onnx::Unsqueeze_7083"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7083"/>
-			</output>
-		</layer>
-		<layer id="4602" name="onnx::Mul_7104" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7104"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7104"/>
-			</output>
-		</layer>
-		<layer id="4603" name="onnx::Unsqueeze_7105" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7104, onnx::Unsqueeze_7105"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7105"/>
-			</output>
-		</layer>
-		<layer id="4604" name="onnx::Unsqueeze_7110" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7110"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7110">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4605" name="onnx::Concat_7111" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7111, onnx::Unsqueeze_7110"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7111">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4606" name="onnx::Reshape_7112" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7112"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7112">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4607" name="onnx::MatMul_7113" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7113"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7113">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4608" name="Constant_148454" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3382109744" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7115, onnx::MatMul_9089"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4609" name="onnx::Add_7115" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7115, onnx::MatMul_9089"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7115">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4610" name="input.992" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.992"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.992">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4611" name="input.996" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.996"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.996">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4612" name="Constant_30854" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30854"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4613" name="onnx::Mul_7126" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7122, onnx::Div_7125, onnx::Mul_7126, onnx::Pow_7119, onnx::ReduceMean_7121, onnx::Sqrt_7124, onnx::Sub_7118"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7126">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4614" name="Constant_150573" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3382519344" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4615" name="onnx::Add_7127" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7127"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7127">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4616" name="Constant_150574" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3382520624" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4617" name="onnx::MatMul_7128" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7128"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7128">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4618" name="Constant_148462" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3382521904" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9090, q.215"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4619" name="q.215" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9090, q.215"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.215">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4620" name="Constant_109407" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4621" name="onnx::Gather_7135" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7135, onnx::Gather_7138, onnx::Gather_7141"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7135,onnx::Gather_7138,onnx::Gather_7141">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4622" name="onnx::Gather_7142" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7142"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7142"/>
-			</output>
-		</layer>
-		<layer id="4623" name="Constant_30880" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30880"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4624" name="onnx::Div_7143" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30880, onnx::Div_7143, onnx::Gather_7142"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7143"/>
-			</output>
-		</layer>
-		<layer id="4625" name="onnx::Div_7144" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7144"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7144"/>
-			</output>
-		</layer>
-		<layer id="4626" name="onnx::Cast_7145" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7145, onnx::Cast_7146, onnx::Div_7144, onnx::Unsqueeze_7147"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7145,onnx::Cast_7146,onnx::Unsqueeze_7147"/>
-			</output>
-		</layer>
-		<layer id="4627" name="onnx::Unsqueeze_7155" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7155"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7155">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4628" name="onnx::Concat_7156" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7156, onnx::Unsqueeze_7155"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7156">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4629" name="onnx::Reshape_7157" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7157"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4630" name="onnx::Transpose_7158" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7158"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7158">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4631" name="Constant_30993" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30993"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4632" name="onnx::Reshape_7159" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7159"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7159">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4633" name="onnx::Gather_7136" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7136"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7136"/>
-			</output>
-		</layer>
-		<layer id="4634" name="Constant_30872" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30872"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4635" name="onnx::Unsqueeze_7137" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30872, onnx::Gather_7136, onnx::Unsqueeze_7137"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7137"/>
-			</output>
-		</layer>
-		<layer id="4636" name="onnx::Mul_7160" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7160"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7160"/>
-			</output>
-		</layer>
-		<layer id="4637" name="onnx::Unsqueeze_7161" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7160, onnx::Unsqueeze_7161"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7161"/>
-			</output>
-		</layer>
-		<layer id="4638" name="onnx::Unsqueeze_7166" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7166"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7166">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4639" name="onnx::Concat_7167" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7167, onnx::Unsqueeze_7166"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7167">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4640" name="Constant_90084" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30876, onnx::Concat_7169, onnx::Gather_7139, onnx::Unsqueeze_7140, onnx::Unsqueeze_7168"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4641" name="Constant_30876" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30876"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4642" name="onnx::Unsqueeze_7140" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_30876, onnx::Concat_7169, onnx::Gather_7139, onnx::Unsqueeze_7140, onnx::Unsqueeze_7168"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7169">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4643" name="onnx::Div_7162" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7162"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7162"/>
-			</output>
-		</layer>
-		<layer id="4644" name="onnx::Cast_7163" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7163, onnx::Cast_7164, onnx::Div_7162, onnx::Unsqueeze_7165"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7163,onnx::Cast_7164,onnx::Unsqueeze_7165"/>
-			</output>
-		</layer>
-		<layer id="4645" name="onnx::Unsqueeze_7170" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7170"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7170">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4646" name="onnx::Concat_7171" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7171, onnx::Unsqueeze_7170"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7171">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4647" name="onnx::Reshape_7172" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7172"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7172">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4648" name="q.219" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.219"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.219">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4649" name="Constant_148469" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="3382931504" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.215, onnx::MatMul_9091"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>768</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4650" name="k.215" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.215, onnx::MatMul_9091"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>768</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.215">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4651" name="onnx::Transpose_7187" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7187"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7187">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4652" name="Constant_31114" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31114"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4653" name="onnx::Reshape_7188" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7188"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7188">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4654" name="k.219" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.219"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="k.219">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4655" name="onnx::Mul_7224" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7224"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7224">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4656" name="Constant_150575" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4657" name="onnx::Softmax_7226" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_7226"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_7226">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4658" name="attn.107" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.107"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="attn.107">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4659" name="Constant_148476" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="3383914544" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9092, v.215"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>768</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4660" name="v.215" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9092, v.215"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>768</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.215">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4661" name="onnx::Transpose_7212" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7212"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7212">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4662" name="Constant_31122" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31122"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4663" name="onnx::Reshape_7213" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7213"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7213">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4664" name="v.219" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.219"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="v.219">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4665" name="out.107" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.107"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="out.107">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4666" name="onnx::Gather_7229" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7229, onnx::Gather_7232, onnx::Gather_7235"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7229,onnx::Gather_7232,onnx::Gather_7235">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4667" name="onnx::Gather_7230" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7230"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7230"/>
-			</output>
-		</layer>
-		<layer id="4668" name="Constant_31134" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31134"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4669" name="onnx::Div_7231" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31134, onnx::Div_7231, onnx::Gather_7230"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7231"/>
-			</output>
-		</layer>
-		<layer id="4670" name="onnx::Div_7238" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7238"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7238"/>
-			</output>
-		</layer>
-		<layer id="4671" name="onnx::Cast_7239" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7239, onnx::Cast_7240, onnx::Div_7238, onnx::Unsqueeze_7241"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7239,onnx::Cast_7240,onnx::Unsqueeze_7241"/>
-			</output>
-		</layer>
-		<layer id="4672" name="onnx::Unsqueeze_7243" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7243"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7243">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4673" name="onnx::Concat_7244" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7244, onnx::Unsqueeze_7243"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7244">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4674" name="Constant_90964" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7251"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4675" name="Constant_90965" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4676" name="Gather_90966" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7251"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4677" name="onnx::Reshape_7251" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7251"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7251">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4678" name="onnx::Transpose_7252" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7252"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7252">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4679" name="Constant_31255" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31255"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4680" name="onnx::Reshape_7253" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7253"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7253">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4681" name="onnx::Div_7254" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7254"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7254"/>
-			</output>
-		</layer>
-		<layer id="4682" name="onnx::Cast_7255" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7255, onnx::Cast_7256, onnx::Div_7254, onnx::Unsqueeze_7257"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7255,onnx::Cast_7256,onnx::Unsqueeze_7257"/>
-			</output>
-		</layer>
-		<layer id="4683" name="onnx::Unsqueeze_7260" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7260"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7260">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4684" name="onnx::Concat_7261" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7261, onnx::Unsqueeze_7260"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7261">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4685" name="Constant_90111" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31138, onnx::Concat_7263, onnx::Gather_7233, onnx::Unsqueeze_7234, onnx::Unsqueeze_7262"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4686" name="Constant_31138" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31138"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4687" name="onnx::Unsqueeze_7234" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31138, onnx::Concat_7263, onnx::Gather_7233, onnx::Unsqueeze_7234, onnx::Unsqueeze_7262"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7263">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4688" name="onnx::Gather_7236" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7236"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7236"/>
-			</output>
-		</layer>
-		<layer id="4689" name="Constant_31142" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31142"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4690" name="onnx::Unsqueeze_7237" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31142, onnx::Gather_7236, onnx::Unsqueeze_7237"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7237"/>
-			</output>
-		</layer>
-		<layer id="4691" name="onnx::Mul_7258" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7258"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7258"/>
-			</output>
-		</layer>
-		<layer id="4692" name="onnx::Unsqueeze_7259" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7258, onnx::Unsqueeze_7259"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7259"/>
-			</output>
-		</layer>
-		<layer id="4693" name="onnx::Unsqueeze_7264" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7264"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7264">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4694" name="onnx::Concat_7265" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7265, onnx::Unsqueeze_7264"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7265">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4695" name="onnx::Reshape_7266" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3652" name="Constant_87156" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7266"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7266">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4696" name="onnx::MatMul_7267" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7267"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7267">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4697" name="Constant_148483" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3384897584" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7269, onnx::MatMul_9113"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4698" name="onnx::Add_7269" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7269, onnx::MatMul_9113"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7269">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4699" name="input.1000" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1000"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1000">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4700" name="input.1004" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1004"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.1004">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4701" name="Constant_31376" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31376"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4702" name="onnx::Mul_7280" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7276, onnx::Div_7279, onnx::Mul_7280, onnx::Pow_7273, onnx::ReduceMean_7275, onnx::Sqrt_7278, onnx::Sub_7272"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7280">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4703" name="Constant_150577" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3385307184" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4704" name="onnx::Add_7281" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7281"/>
-			</rt_info>
+		<layer id="3653" name="/up_blocks.3/resnets.0/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7281">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4705" name="Constant_150578" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3385308464" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4706" name="onnx::MatMul_7282" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7282"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7282">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/norm2/Add_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4707" name="Constant_148491" type="Const" version="opset1">
-			<data element_type="f32" shape="2560, 320" offset="3385309744" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7284, onnx::MatMul_9114"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>2560</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4708" name="onnx::Add_7284" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7284, onnx::MatMul_9114"/>
-			</rt_info>
+		<layer id="3654" name="/up_blocks.3/resnets.0/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2560</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7284">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4709" name="onnx::Shape_7285" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_7285"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_7285">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4710" name="Constant_128280" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4711" name="Constant_128281" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4712" name="Constant_128277" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4713" name="onnx::Gather_7286" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7286"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32" names="/up_blocks.3/resnets.0/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7286">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4714" name="onnx::Gather_7287" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7287"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7287">
-					<dim>1</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
-		</layer>
-		<layer id="4715" name="Constant_31393" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31393"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4716" name="onnx::Add_7288" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31393, onnx::Add_7288, onnx::Gather_7287"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
+		</layer>
+		<layer id="3655" name="up_blocks.3.resnets.0.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="1688389202" size="1843200" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_7288">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4717" name="onnx::Add_7290" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
+		<layer id="3656" name="up_blocks.3.resnets.0.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7290"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_7290">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.0.conv2.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4718" name="onnx::Div_7291" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7290, onnx::Div_7291"/>
-			</rt_info>
+		<layer id="3657" name="/up_blocks.3/resnets.0/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_7291">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4719" name="onnx::Div_7292" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7292"/>
-			</rt_info>
+		<layer id="3658" name="Reshape_28768_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1690232402" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_7292">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4720" name="onnx::Mul_7293" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3659" name="Reshape_28768" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7292, onnx::Mul_7293, onnx::Mul_7294, onnx::Slice_7295"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_7293,onnx::Slice_7295">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4721" name="Constant_128276" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4722" name="ScatterUpdate_128282" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
+		<layer id="3660" name="/up_blocks.3/resnets.0/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4723" name="Constant_128285" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/conv2/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4724" name="onnx::Mul_7296" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31404, onnx::Gather_7287, onnx::Mul_7296"/>
-			</rt_info>
+		<layer id="3661" name="/up_blocks.3/resnets.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_7296">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.0/Add_1_output_0,/up_blocks.3/resnets.0/Div_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4725" name="Constant_128349" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3662" name="/up_blocks.3/attentions.0/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/norm/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4726" name="Constant_128348" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3663" name="/up_blocks.3/attentions.0/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/norm/Reshape_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4727" name="Constant_128347" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3664" name="Constant_28856" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I32">
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4728" name="ScatterUpdate_128350" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3665" name="MVN_28857" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/norm/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+			</output>
+		</layer>
+		<layer id="3666" name="/up_blocks.3/attentions.0/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="I64" names="/up_blocks.3/attentions.0/norm/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4729" name="Constant_128351" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3667" name="/up_blocks.3/attentions.0/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/norm/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4730" name="onnx::Mul_7297" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7297"/>
-			</rt_info>
+		<layer id="3668" name="Constant_87157_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1690233042" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7297">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4731" name="onnx::Slice_7298" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3669" name="Constant_87157" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7297, onnx::Slice_7298"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_7298">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4732" name="ScatterUpdate_128352" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3670" name="/up_blocks.3/attentions.0/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/norm/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4733" name="Constant_128355" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
-			</rt_info>
+		<layer id="3671" name="Constant_87158_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1690233682" size="640" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4734" name="onnx::Div_7299" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
+		<layer id="3672" name="Constant_87158" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_31473, onnx::Div_7299, onnx::Gather_7287"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_7299">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4735" name="onnx::Mul_7307" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7302, onnx::Erf_7301, onnx::Mul_7304, onnx::Mul_7305, onnx::Mul_7307"/>
-			</rt_info>
+		<layer id="3673" name="/up_blocks.3/attentions.0/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_7307">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/norm/Add_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4736" name="input.1008" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1008"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
-				</port>
-			</input>
+		<layer id="3674" name="up_blocks.3.attentions.0.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="1690234322" size="204800" />
 			<output>
-				<port id="2" precision="FP32" names="input.1008">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4737" name="Constant_148499" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="3388586544" size="1638400"/>
+		<layer id="3675" name="up_blocks.3.attentions.0.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7310, onnx::MatMul_9115"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32" names="up_blocks.3.attentions.0.proj_in.weight">
 					<dim>320</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4738" name="onnx::Add_7310" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7310, onnx::MatMul_9115"/>
-			</rt_info>
+		<layer id="3676" name="/up_blocks.3/attentions.0/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7310">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3677" name="Reshape_28979_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1690439122" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4739" name="onnx::Add_7311" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3678" name="Reshape_28979" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7311"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7311">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4740" name="onnx::Reshape_7312" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7312"/>
-			</rt_info>
+		<layer id="3679" name="/up_blocks.3/attentions.0/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7312">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/proj_in/Conv_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4741" name="Constant_90977" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7321"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4742" name="Constant_90978" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3680" name="Constant_29007" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4743" name="Gather_90979" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7321"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7321">
-					<dim>4</dim>
-				</port>
 			</output>
 		</layer>
-		<layer id="4744" name="onnx::Transpose_7322" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7322"/>
-			</rt_info>
+		<layer id="3681" name="/up_blocks.3/attentions.0/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7322">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/Transpose_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>64</dim>
@@ -79295,21 +60754,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="4745" name="Constant_31638" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31638"/>
-			</rt_info>
+		<layer id="3682" name="/up_blocks.3/attentions.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4746" name="input.1012" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1012"/>
-			</rt_info>
+		<layer id="3683" name="/up_blocks.3/attentions.0/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -79318,1879 +60772,1539 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1012">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/Reshape_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4747" name="m.up_blocks.3.attentions.0.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="3390224944" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.attentions.0.proj_out.weight"/>
-			</rt_info>
+		<layer id="3684" name="Constant_29016" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.attentions.0.proj_out.weight">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4748" name="Convolution_31640" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_31640"/>
-			</rt_info>
+		<layer id="3685" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm1/Div_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4749" name="Reshape_31660" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3390634544" size="1280"/>
+		<layer id="3686" name="Constant_87159_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1690439762" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4750" name="onnx::Add_7324" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3687" name="Constant_87159" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_31659, Reshape_31660, onnx::Add_7324"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7324">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4751" name="onnx::Concat_7325" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7325"/>
-			</rt_info>
+		<layer id="3688" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_7325">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm1/Mul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4752" name="onnx::Cast_7326" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1016, onnx::Cast_7326"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
+		<layer id="3689" name="Constant_87160_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1690440402" size="640" />
 			<output>
-				<port id="2" precision="FP32" names="input.1016,onnx::Cast_7326">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4753" name="m.up_blocks.3.resnets.1.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 640, 1, 1" offset="3390635824" size="819200"/>
+		<layer id="3690" name="Constant_87160" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.1.conv_shortcut.weight"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
-			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.1.conv_shortcut.weight">
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>640</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4754" name="Convolution_32030" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_32030"/>
-			</rt_info>
+		<layer id="3691" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4755" name="Reshape_32050" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3391455024" size="1280"/>
+		<layer id="3692" name="Constant_85954_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1690441042" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4756" name="onnx::Add_7371" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3693" name="Constant_85954" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_32049, Reshape_32050, onnx::Add_7371"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3694" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7371">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4757" name="onnx::Reshape_7328" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7328"/>
-			</rt_info>
+		<layer id="3695" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_7328">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4758" name="onnx::InstanceNormalization_7329" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_7329"/>
-			</rt_info>
+		<layer id="3696" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_7329">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4759" name="Constant_31700" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31700"/>
-			</rt_info>
+		<layer id="3697" name="Constant_29039" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4760" name="MVN_31701" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_31720, Concat_31765, MVN_31701, Multiply_31748, Reshape_31721, Reshape_31766, onnx::Reshape_7332"/>
-			</rt_info>
+		<layer id="3698" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7332">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4761" name="onnx::Reshape_7333" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7333"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
+		<layer id="3699" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_7333">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4762" name="onnx::Mul_7334" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7334"/>
-			</rt_info>
+		<layer id="3700" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7334">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4763" name="Constant_150581" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3391456304" size="2560"/>
+		<layer id="3701" name="Constant_85961_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1690645842" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4764" name="onnx::Add_7337" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3702" name="Constant_85961" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7337"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3703" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7337">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4765" name="Constant_150582" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3391458864" size="2560"/>
+		<layer id="3704" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4766" name="onnx::Cast_7340" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1020, onnx::Cast_7340"/>
-			</rt_info>
+		<layer id="3705" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1020,onnx::Cast_7340">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4767" name="input.1024" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1024, onnx::Mul_7342"/>
-			</rt_info>
+		<layer id="3706" name="Constant_29055" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3707" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.1024">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4768" name="m.up_blocks.3.resnets.1.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 640, 3, 3" offset="3391461424" size="7372800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.1.conv1.weight"/>
-			</rt_info>
+		<layer id="3708" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.1.conv1.weight">
-					<dim>320</dim>
-					<dim>640</dim>
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4769" name="Convolution_31806" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_31806"/>
-			</rt_info>
+		<layer id="3709" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>640</dim>
-					<dim>3</dim>
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4770" name="Reshape_31826" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3398834224" size="1280"/>
+		<layer id="3710" name="Constant_87161_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4771" name="onnx::Add_7344" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3711" name="Constant_87161" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_31825, Reshape_31826, onnx::Add_7344"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7344">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4772" name="onnx::Gemm_7346" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_7346, onnx::Mul_7345"/>
-			</rt_info>
+		<layer id="3712" name="Multiply_86227" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_7346">
-					<dim>2</dim>
-					<dim>1280</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4773" name="m.up_blocks.3.resnets.1.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="3398835504" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.1.time_emb_proj.weight"/>
-			</rt_info>
+		<layer id="3713" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.1.time_emb_proj.weight">
-					<dim>320</dim>
-					<dim>1280</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Add_output_0,/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4774" name="MatMul_31858" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_31858"/>
-			</rt_info>
+		<layer id="3714" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>1280</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4775" name="Constant_150583" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320" offset="3400473904" size="1280"/>
+		<layer id="3715" name="Constant_85968_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1690850642" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4776" name="onnx::Unsqueeze_7347" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3716" name="Constant_85968" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_31859, onnx::Unsqueeze_7347"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_7347">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4777" name="onnx::Unsqueeze_7348" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7348"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7348">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4778" name="onnx::Unsqueeze_7349" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7349"/>
-			</rt_info>
+		<layer id="3717" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_7349">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4779" name="onnx::Unsqueeze_7350" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7350"/>
-			</rt_info>
+		<layer id="3718" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7350">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4780" name="onnx::Add_7351" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7351"/>
-			</rt_info>
+		<layer id="3719" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7351">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4781" name="onnx::Cast_7352" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1028, onnx::Cast_7352"/>
-			</rt_info>
+		<layer id="3720" name="Constant_29071" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3721" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1028,onnx::Cast_7352">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4782" name="onnx::Reshape_7354" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7354"/>
-			</rt_info>
+		<layer id="3722" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_7354">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_5_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4783" name="onnx::InstanceNormalization_7355" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_7355"/>
-			</rt_info>
+		<layer id="3723" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_7355">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4784" name="Constant_31876" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_31876"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4785" name="MVN_31877" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_31896, Concat_31941, MVN_31877, Multiply_31924, Reshape_31897, Reshape_31942, onnx::Reshape_7358"/>
-			</rt_info>
+		<layer id="3724" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7358">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4786" name="onnx::Reshape_7359" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7359"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
+		<layer id="3725" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_7359">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4787" name="onnx::Mul_7360" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7360"/>
-			</rt_info>
+		<layer id="3726" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7360">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4788" name="Constant_150584" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3400475184" size="1280"/>
+		<layer id="3727" name="Constant_29097" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4789" name="onnx::Add_7363" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7363"/>
-			</rt_info>
+		<layer id="3728" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7363">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4790" name="Constant_150585" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3400476464" size="1280"/>
+		<layer id="3729" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4791" name="onnx::Cast_7366" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1032, onnx::Cast_7366"/>
-			</rt_info>
+		<layer id="3730" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="input.1032,onnx::Cast_7366">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="4792" name="input.1036" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1036, onnx::Mul_7368"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.1036">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4793" name="m.up_blocks.3.resnets.1.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="3400477744" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.1.conv2.weight"/>
-			</rt_info>
+		<layer id="3731" name="Constant_85975_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1691055442" size="204800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.1.conv2.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4794" name="Convolution_31982" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="3732" name="Constant_85975" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_31982"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4795" name="Reshape_32002" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3404164144" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4796" name="onnx::Add_7370" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_32001, Reshape_32002, onnx::Add_7370"/>
-			</rt_info>
+		<layer id="3733" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7370">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4797" name="onnx::Div_7372" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1040, onnx::Div_7372"/>
-			</rt_info>
+		<layer id="3734" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1040,onnx::Div_7372">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4798" name="onnx::Reshape_7387" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7387"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_7387">
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4799" name="onnx::InstanceNormalization_7388" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_7388"/>
-			</rt_info>
+		<layer id="3735" name="/up_blocks.3/attentions.0/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_7388">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4800" name="Constant_32106" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32106"/>
-			</rt_info>
+		<layer id="3736" name="Constant_29109" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4801" name="MVN_32107" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_32126, Concat_32171, MVN_32107, Multiply_32154, Reshape_32127, Reshape_32172, onnx::Reshape_7391"/>
-			</rt_info>
+		<layer id="3737" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7391">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3738" name="Constant_87163_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1691260242" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4802" name="onnx::Reshape_7392" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="3739" name="Constant_87163" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7375, onnx::Gather_7378, onnx::Gather_7381, onnx::Gather_7384, onnx::Reshape_7392"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7375,onnx::Gather_7378,onnx::Gather_7381,onnx::Gather_7384,onnx::Reshape_7392">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4803" name="onnx::Mul_7393" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7393"/>
-			</rt_info>
+		<layer id="3740" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7393">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4804" name="Constant_150586" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3404165424" size="1280"/>
+		<layer id="3741" name="Constant_87164_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1691260882" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4805" name="onnx::Add_7396" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3742" name="Constant_87164" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7396"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7396">
-					<dim>2</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="4806" name="Constant_150587" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3404166704" size="1280"/>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4807" name="input.1044" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1044"/>
-			</rt_info>
+		<layer id="3743" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1044">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4808" name="m.up_blocks.3.attentions.1.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="3404167984" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.attentions.1.proj_in.weight"/>
-			</rt_info>
+		<layer id="3744" name="Constant_85983_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1691261522" size="204800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.attentions.1.proj_in.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4809" name="Convolution_32209" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3745" name="Constant_85983" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_32209"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4810" name="Reshape_32229" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3404577584" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4811" name="onnx::Transpose_7400" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_32228, Reshape_32229, onnx::Transpose_7400"/>
-			</rt_info>
+		<layer id="3746" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7400">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4812" name="Constant_32257" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32257"/>
-			</rt_info>
+		<layer id="3747" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4813" name="onnx::Reshape_7401" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7401"/>
-			</rt_info>
+		<layer id="3748" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7401">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-					<dim>320</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4814" name="Constant_90156" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32083, onnx::Concat_7404, onnx::Gather_7376, onnx::Unsqueeze_7377, onnx::Unsqueeze_7403"/>
-			</rt_info>
+		<layer id="3749" name="Constant_29132" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4815" name="Constant_32083" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32083"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4816" name="onnx::Unsqueeze_7377" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32083, onnx::Concat_7404, onnx::Gather_7376, onnx::Unsqueeze_7377, onnx::Unsqueeze_7403"/>
-			</rt_info>
+		<layer id="3750" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7404">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4817" name="onnx::Gather_7382" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7382"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7382"/>
-			</output>
-		</layer>
-		<layer id="4818" name="Constant_32091" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32091"/>
-			</rt_info>
+		<layer id="3751" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4819" name="onnx::Mul_7383" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32091, onnx::Gather_7382, onnx::Mul_7383"/>
-			</rt_info>
+		<layer id="3752" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_7383"/>
-			</output>
-		</layer>
-		<layer id="4820" name="onnx::Gather_7385" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7385"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7385"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4821" name="Constant_32095" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32095"/>
-			</rt_info>
+		<layer id="3753" name="Constant_85990_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="1691466322" size="491520" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4822" name="onnx::Mul_7386" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3754" name="Constant_85990" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32095, onnx::Gather_7385, onnx::Mul_7386"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_7386"/>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4823" name="onnx::Unsqueeze_7402" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7402"/>
-			</rt_info>
+		<layer id="3755" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7402"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4824" name="onnx::Unsqueeze_7405" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7405"/>
-			</rt_info>
+		<layer id="3756" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7405">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4825" name="onnx::Concat_7406" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7406, onnx::Unsqueeze_7405"/>
-			</rt_info>
+		<layer id="3757" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7406">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4826" name="Constant_90165" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32087, onnx::Concat_7408, onnx::Gather_7379, onnx::Unsqueeze_7380, onnx::Unsqueeze_7407"/>
-			</rt_info>
+		<layer id="3758" name="Constant_29148" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4827" name="Constant_32087" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32087"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4828" name="onnx::Unsqueeze_7380" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32087, onnx::Concat_7408, onnx::Gather_7379, onnx::Unsqueeze_7380, onnx::Unsqueeze_7407"/>
-			</rt_info>
+		<layer id="3759" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7408">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4829" name="onnx::Reshape_7409" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7409"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3760" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7409">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4830" name="input.1048" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1048"/>
-			</rt_info>
+		<layer id="3761" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1048">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4831" name="Constant_32342" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32342"/>
-			</rt_info>
+		<layer id="3762" name="Constant_87165_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4832" name="onnx::Mul_7419" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="3763" name="Constant_87165" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7415, onnx::Div_7418, onnx::Mul_7419, onnx::Pow_7412, onnx::ReduceMean_7414, onnx::Sqrt_7417, onnx::Sub_7411"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7419">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4833" name="Constant_150588" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3404578864" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4834" name="onnx::Add_7420" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7420"/>
-			</rt_info>
+		<layer id="3764" name="Multiply_86229" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7420">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4835" name="Constant_150589" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3404580144" size="1280"/>
-			<output>
+		<layer id="3765" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Add_output_0,/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4836" name="onnx::MatMul_7421" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7421"/>
-			</rt_info>
+		<layer id="3766" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7421">
-					<dim>2</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
 					<dim>4096</dim>
+					<dim>77</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3767" name="Constant_85997_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="1691957842" size="491520" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4837" name="Constant_148510" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3404581424" size="409600"/>
+		<layer id="3768" name="Constant_85997" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9122, q.223"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+			<input>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
 					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4838" name="q.223" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9122, q.223"/>
-			</rt_info>
+		<layer id="3769" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.223">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>77</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4839" name="Constant_109476" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3770" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4840" name="onnx::Gather_7428" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7428, onnx::Gather_7431, onnx::Gather_7434"/>
-			</rt_info>
+		<layer id="3771" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>77</dim>
 					<dim>320</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7428,onnx::Gather_7431,onnx::Gather_7434">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_4_output_0">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4841" name="onnx::Gather_7435" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7435"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7435"/>
-			</output>
-		</layer>
-		<layer id="4842" name="Constant_32368" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32368"/>
-			</rt_info>
+		<layer id="3772" name="Constant_29164" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4843" name="onnx::Div_7436" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32368, onnx::Div_7436, onnx::Gather_7435"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7436"/>
-			</output>
-		</layer>
-		<layer id="4844" name="onnx::Div_7437" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7437"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7437"/>
 			</output>
 		</layer>
-		<layer id="4845" name="onnx::Cast_7438" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7438, onnx::Cast_7439, onnx::Div_7437, onnx::Unsqueeze_7440"/>
-			</rt_info>
+		<layer id="3773" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7438,onnx::Cast_7439,onnx::Unsqueeze_7440"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4846" name="onnx::Unsqueeze_7448" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7448"/>
-			</rt_info>
+		<layer id="3774" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7448">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4847" name="onnx::Concat_7449" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7449, onnx::Unsqueeze_7448"/>
-			</rt_info>
+		<layer id="3775" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7449">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4848" name="onnx::Reshape_7450" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7450"/>
-			</rt_info>
+		<layer id="3776" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3777" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_8_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4849" name="onnx::Transpose_7451" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7451"/>
-			</rt_info>
+		<layer id="3778" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7451">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>8</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4850" name="Constant_32481" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32481"/>
-			</rt_info>
+		<layer id="3779" name="Constant_29190" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4851" name="onnx::Reshape_7452" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7452"/>
-			</rt_info>
+		<layer id="3780" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>8</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -81198,249 +62312,115 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7452">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
+					<dim>8</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4852" name="onnx::Gather_7429" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7429"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7429"/>
-			</output>
-		</layer>
-		<layer id="4853" name="Constant_32360" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32360"/>
-			</rt_info>
+		<layer id="3781" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4854" name="onnx::Unsqueeze_7430" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32360, onnx::Gather_7429, onnx::Unsqueeze_7430"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7430"/>
-			</output>
-		</layer>
-		<layer id="4855" name="onnx::Mul_7453" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7453"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7453"/>
 			</output>
 		</layer>
-		<layer id="4856" name="onnx::Unsqueeze_7454" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7453, onnx::Unsqueeze_7454"/>
-			</rt_info>
+		<layer id="3782" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7454"/>
-			</output>
-		</layer>
-		<layer id="4857" name="onnx::Unsqueeze_7459" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7459"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7459">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="4858" name="onnx::Concat_7460" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7460, onnx::Unsqueeze_7459"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7460">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/Reshape_7_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4859" name="Constant_90192" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32364, onnx::Concat_7462, onnx::Gather_7432, onnx::Unsqueeze_7433, onnx::Unsqueeze_7461"/>
-			</rt_info>
+		<layer id="3783" name="Constant_86004_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1692449362" size="204800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4860" name="Constant_32364" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32364"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4861" name="onnx::Unsqueeze_7433" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3784" name="Constant_86004" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32364, onnx::Concat_7462, onnx::Gather_7432, onnx::Unsqueeze_7433, onnx::Unsqueeze_7461"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7462">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4862" name="onnx::Div_7455" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7455"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7455"/>
-			</output>
-		</layer>
-		<layer id="4863" name="onnx::Cast_7456" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7456, onnx::Cast_7457, onnx::Div_7455, onnx::Unsqueeze_7458"/>
-			</rt_info>
+		<layer id="3785" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7456,onnx::Cast_7457,onnx::Unsqueeze_7458"/>
-			</output>
-		</layer>
-		<layer id="4864" name="onnx::Unsqueeze_7463" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7463"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7463">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="4865" name="onnx::Concat_7464" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7464, onnx::Unsqueeze_7463"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7464">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4866" name="onnx::Reshape_7465" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7465"/>
-			</rt_info>
+		<layer id="3786" name="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7465">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4867" name="q.227" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.227"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.227">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4868" name="Constant_148517" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3404991024" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.223, onnx::MatMul_9123"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4869" name="k.223" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.223, onnx::MatMul_9123"/>
-			</rt_info>
+		<layer id="3787" name="/up_blocks.3/attentions.0/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -81448,316 +62428,276 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>320</dim>
+					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.223">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4870" name="Constant_109545" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3788" name="Constant_29202" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4871" name="onnx::Gather_7467" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7467, onnx::Gather_7470, onnx::Gather_7473"/>
-			</rt_info>
+		<layer id="3789" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7467,onnx::Gather_7470,onnx::Gather_7473">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm3/Div_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4872" name="onnx::Gather_7474" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7474"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7474"/>
-			</output>
-		</layer>
-		<layer id="4873" name="Constant_32609" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32609"/>
-			</rt_info>
+		<layer id="3790" name="Constant_87167_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1692654162" size="640" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4874" name="onnx::Div_7475" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3791" name="Constant_87167" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32609, onnx::Div_7475, onnx::Gather_7474"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_7475"/>
-			</output>
-		</layer>
-		<layer id="4875" name="onnx::Div_7476" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7476"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7476"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4876" name="onnx::Cast_7477" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7477, onnx::Cast_7478, onnx::Div_7476, onnx::Unsqueeze_7479"/>
-			</rt_info>
+		<layer id="3792" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7477,onnx::Cast_7478,onnx::Unsqueeze_7479"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm3/Mul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4877" name="onnx::Unsqueeze_7487" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7487"/>
-			</rt_info>
+		<layer id="3793" name="Constant_87168_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1692654802" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7487">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4878" name="onnx::Concat_7488" type="Unsqueeze" version="opset1">
+		<layer id="3794" name="Constant_87168" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7488, onnx::Unsqueeze_7487"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7488">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4879" name="onnx::Reshape_7489" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7489"/>
-			</rt_info>
+		<layer id="3795" name="/up_blocks.3/attentions.0/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/norm3/Add_1_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3796" name="Constant_86012_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="2560, 320" offset="1692655442" size="1638400" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4880" name="onnx::Transpose_7490" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="3797" name="Constant_86012" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7490"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3798" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7490">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4881" name="Constant_32722" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32722"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4882" name="onnx::Reshape_7491" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7491"/>
-			</rt_info>
+		<layer id="3799" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7491">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4883" name="onnx::Gather_7468" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7468"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7468"/>
-			</output>
-		</layer>
-		<layer id="4884" name="Constant_32601" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32601"/>
-			</rt_info>
+		<layer id="3800" name="Constant_78821" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4885" name="onnx::Unsqueeze_7469" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32601, onnx::Gather_7468, onnx::Unsqueeze_7469"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7469"/>
-			</output>
-		</layer>
-		<layer id="4886" name="onnx::Mul_7492" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7492"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7492"/>
 			</output>
 		</layer>
-		<layer id="4887" name="onnx::Unsqueeze_7493" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7492, onnx::Unsqueeze_7493"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="3801" name="Constant_78822" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7493"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4888" name="onnx::Unsqueeze_7498" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7498"/>
-			</rt_info>
+		<layer id="3802" name="Constant_78818" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7498">
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4889" name="onnx::Concat_7499" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7499, onnx::Unsqueeze_7498"/>
-			</rt_info>
+		<layer id="3803" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7499">
-					<dim>1</dim>
+				<port id="1" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Shape_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4890" name="Constant_90219" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32605, onnx::Concat_7501, onnx::Gather_7471, onnx::Unsqueeze_7472, onnx::Unsqueeze_7500"/>
-			</rt_info>
+		<layer id="3804" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4891" name="Constant_32605" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32605"/>
-			</rt_info>
+		<layer id="3805" name="Constant_29219" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="4892" name="onnx::Unsqueeze_7472" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32605, onnx::Concat_7501, onnx::Gather_7471, onnx::Unsqueeze_7472, onnx::Unsqueeze_7500"/>
-			</rt_info>
+		<layer id="3806" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -81765,68 +62705,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7501">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4893" name="onnx::Div_7494" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7494"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7494"/>
-			</output>
-		</layer>
-		<layer id="4894" name="onnx::Cast_7495" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7495, onnx::Cast_7496, onnx::Div_7494, onnx::Unsqueeze_7497"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7495,onnx::Cast_7496,onnx::Unsqueeze_7497"/>
-			</output>
-		</layer>
-		<layer id="4895" name="onnx::Unsqueeze_7502" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7502"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7502">
+				<port id="3" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4896" name="onnx::Concat_7503" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7503, onnx::Unsqueeze_7502"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="3807" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7503">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4897" name="onnx::Reshape_7504" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7504"/>
-			</rt_info>
+		<layer id="3808" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -81834,273 +62730,181 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7504">
-					<dim>3</dim>
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4898" name="k.227" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.227"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
+		<layer id="3809" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="2" precision="FP32" names="k.227">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_3_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4899" name="onnx::Mul_7545" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7545"/>
-			</rt_info>
+		<layer id="3810" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7545">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4900" name="Constant_150590" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
+		<layer id="3811" name="Constant_78817" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4901" name="onnx::Softmax_7547" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_7547"/>
-			</rt_info>
+		<layer id="3812" name="ScatterUpdate_78823" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_7547">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="2" precision="I64">
+					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="4902" name="attn.111" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.111"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="3" precision="I32">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.111">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4903" name="Constant_148524" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3405400624" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9124, v.223"/>
-			</rt_info>
+		<layer id="3813" name="Constant_78826" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4904" name="v.223" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9124, v.223"/>
-			</rt_info>
+		<layer id="3814" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.223">
+				<port id="4" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4905" name="Constant_109614" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3815" name="Constant_78890" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4906" name="onnx::Gather_7506" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7506, onnx::Gather_7509, onnx::Gather_7512"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</input>
-			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7506,onnx::Gather_7509,onnx::Gather_7512">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4907" name="onnx::Gather_7513" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7513"/>
-			</rt_info>
+		<layer id="3816" name="Constant_78889" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7513"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4908" name="Constant_32850" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32850"/>
-			</rt_info>
+		<layer id="3817" name="Constant_78888" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4909" name="onnx::Div_7514" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32850, onnx::Div_7514, onnx::Gather_7513"/>
-			</rt_info>
+		<layer id="3818" name="ScatterUpdate_78891" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_7514"/>
-			</output>
-		</layer>
-		<layer id="4910" name="onnx::Div_7515" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7515"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7515"/>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4911" name="onnx::Cast_7516" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7516, onnx::Cast_7517, onnx::Div_7515, onnx::Unsqueeze_7518"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
+		<layer id="3819" name="Constant_78892" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7516,onnx::Cast_7517,onnx::Unsqueeze_7518"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4912" name="onnx::Unsqueeze_7526" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7526"/>
-			</rt_info>
+		<layer id="3820" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7526">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4913" name="onnx::Concat_7527" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7527, onnx::Unsqueeze_7526"/>
-			</rt_info>
+		<layer id="3821" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7527">
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4914" name="onnx::Reshape_7528" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7528"/>
-			</rt_info>
+		<layer id="3822" name="ScatterUpdate_78893" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
@@ -82108,2183 +62912,1950 @@
 				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4915" name="onnx::Transpose_7529" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7529"/>
-			</rt_info>
+		<layer id="3823" name="Constant_78896" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3824" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>2560</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7529">
+				<port id="4" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4916" name="Constant_32963" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32963"/>
-			</rt_info>
+		<layer id="3825" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_3_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4917" name="onnx::Reshape_7530" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7530"/>
-			</rt_info>
+		<layer id="3826" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7530">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4918" name="onnx::Gather_7507" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7507"/>
-			</rt_info>
+		<layer id="3827" name="Constant_86020_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="1694293842" size="819200" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7507"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4919" name="Constant_32842" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3828" name="Constant_86020" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32842"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4920" name="onnx::Unsqueeze_7508" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32842, onnx::Gather_7507, onnx::Unsqueeze_7508"/>
-			</rt_info>
+		<layer id="3829" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7508"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.2/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4921" name="onnx::Mul_7531" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7531"/>
-			</rt_info>
+		<layer id="3830" name="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7531"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4922" name="onnx::Unsqueeze_7532" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7531, onnx::Unsqueeze_7532"/>
-			</rt_info>
+		<layer id="3831" name="/up_blocks.3/attentions.0/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7532"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/transformer_blocks.0/Add_2_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4923" name="onnx::Unsqueeze_7537" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7537"/>
-			</rt_info>
+		<layer id="3832" name="/up_blocks.3/attentions.0/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790174" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7537">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.0/Constant_1_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4924" name="onnx::Concat_7538" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7538, onnx::Unsqueeze_7537"/>
-			</rt_info>
+		<layer id="3833" name="/up_blocks.3/attentions.0/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7538">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4925" name="Constant_90246" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32846, onnx::Concat_7540, onnx::Gather_7510, onnx::Unsqueeze_7511, onnx::Unsqueeze_7539"/>
-			</rt_info>
+		<layer id="3834" name="Constant_29384" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4926" name="Constant_32846" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32846"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4927" name="onnx::Unsqueeze_7511" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_32846, onnx::Concat_7540, onnx::Gather_7510, onnx::Unsqueeze_7511, onnx::Unsqueeze_7539"/>
-			</rt_info>
+		<layer id="3835" name="/up_blocks.3/attentions.0/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7540">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4928" name="onnx::Div_7533" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7533"/>
-			</rt_info>
+		<layer id="3836" name="up_blocks.3.attentions.0.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="1695113042" size="204800" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_7533"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4929" name="onnx::Cast_7534" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="3837" name="up_blocks.3.attentions.0.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7534, onnx::Cast_7535, onnx::Div_7533, onnx::Unsqueeze_7536"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7534,onnx::Cast_7535,onnx::Unsqueeze_7536"/>
-			</output>
-		</layer>
-		<layer id="4930" name="onnx::Unsqueeze_7541" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7541"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7541">
+				<port id="1" precision="FP32" names="up_blocks.3.attentions.0.proj_out.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4931" name="onnx::Concat_7542" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7542, onnx::Unsqueeze_7541"/>
-			</rt_info>
+		<layer id="3838" name="/up_blocks.3/attentions.0/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7542">
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3839" name="Reshape_29406_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1695317842" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4932" name="onnx::Reshape_7543" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3840" name="Reshape_29406" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7543"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7543">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4933" name="v.227" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.227"/>
-			</rt_info>
+		<layer id="3841" name="/up_blocks.3/attentions.0/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.227">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/proj_out/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4934" name="out.111" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.111"/>
-			</rt_info>
+		<layer id="3842" name="/up_blocks.3/attentions.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.111">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.0/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4935" name="onnx::Gather_7550" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7550, onnx::Gather_7553, onnx::Gather_7556"/>
-			</rt_info>
+		<layer id="3843" name="/up_blocks.3/Concat_1" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7550,onnx::Gather_7553,onnx::Gather_7556">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/Concat_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4936" name="onnx::Gather_7551" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7551"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7551"/>
-			</output>
-		</layer>
-		<layer id="4937" name="Constant_33088" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33088"/>
-			</rt_info>
+		<layer id="3844" name="up_blocks.3.resnets.1.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 640, 1, 1" offset="1695318482" size="409600" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4938" name="onnx::Div_7552" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3845" name="up_blocks.3.resnets.1.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33088, onnx::Div_7552, onnx::Gather_7551"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_7552"/>
-			</output>
-		</layer>
-		<layer id="4939" name="onnx::Div_7559" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7559"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7559"/>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.1.conv_shortcut.weight">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4940" name="onnx::Cast_7560" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7560, onnx::Cast_7561, onnx::Div_7559, onnx::Unsqueeze_7562"/>
-			</rt_info>
+		<layer id="3846" name="/up_blocks.3/resnets.1/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7560,onnx::Cast_7561,onnx::Unsqueeze_7562"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4941" name="onnx::Unsqueeze_7564" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7564"/>
-			</rt_info>
+		<layer id="3847" name="Reshape_29791_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1695728082" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7564">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4942" name="onnx::Concat_7565" type="Unsqueeze" version="opset1">
+		<layer id="3848" name="Reshape_29791" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7565, onnx::Unsqueeze_7564"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7565">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4943" name="Constant_90996" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7572"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4944" name="Constant_90997" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4945" name="Gather_90998" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7572"/>
-			</rt_info>
+		<layer id="3849" name="/up_blocks.3/resnets.1/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/conv_shortcut/Conv_output_0">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4946" name="onnx::Reshape_7572" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7572"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
-				</port>
-			</input>
+		<layer id="3850" name="/up_blocks.3/resnets.1/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7572">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.1/norm1/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4947" name="onnx::Transpose_7573" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7573"/>
-			</rt_info>
+		<layer id="3851" name="/up_blocks.3/resnets.1/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7573">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4948" name="Constant_33209" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33209"/>
-			</rt_info>
+		<layer id="3852" name="Constant_29445" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4949" name="onnx::Reshape_7574" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7574"/>
-			</rt_info>
+		<layer id="3853" name="MVN_29446" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7574">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4950" name="onnx::Div_7575" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7575"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7575"/>
-			</output>
-		</layer>
-		<layer id="4951" name="onnx::Cast_7576" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7576, onnx::Cast_7577, onnx::Div_7575, onnx::Unsqueeze_7578"/>
-			</rt_info>
+		<layer id="3854" name="/up_blocks.3/resnets.1/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7576,onnx::Cast_7577,onnx::Unsqueeze_7578"/>
-			</output>
-		</layer>
-		<layer id="4952" name="onnx::Unsqueeze_7581" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7581"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7581">
-					<dim>1</dim>
+				<port id="1" precision="I64" names="/up_blocks.3/resnets.1/norm1/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4953" name="onnx::Concat_7582" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7582, onnx::Unsqueeze_7581"/>
-			</rt_info>
+		<layer id="3855" name="/up_blocks.3/resnets.1/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7582">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm1/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4954" name="Constant_90273" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33092, onnx::Concat_7584, onnx::Gather_7554, onnx::Unsqueeze_7555, onnx::Unsqueeze_7583"/>
-			</rt_info>
+		<layer id="3856" name="Constant_87171_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1695728722" size="1280" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4955" name="Constant_33092" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33092"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4956" name="onnx::Unsqueeze_7555" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33092, onnx::Concat_7584, onnx::Gather_7554, onnx::Unsqueeze_7555, onnx::Unsqueeze_7583"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7584">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4957" name="onnx::Gather_7557" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7557"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7557"/>
-			</output>
-		</layer>
-		<layer id="4958" name="Constant_33096" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3857" name="Constant_87171" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33096"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4959" name="onnx::Unsqueeze_7558" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33096, onnx::Gather_7557, onnx::Unsqueeze_7558"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7558"/>
-			</output>
-		</layer>
-		<layer id="4960" name="onnx::Mul_7579" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7579"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7579"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4961" name="onnx::Unsqueeze_7580" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7579, onnx::Unsqueeze_7580"/>
-			</rt_info>
+		<layer id="3858" name="/up_blocks.3/resnets.1/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7580"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4962" name="onnx::Unsqueeze_7585" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7585"/>
-			</rt_info>
+		<layer id="3859" name="Constant_87172_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1695730002" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7585">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4963" name="onnx::Concat_7586" type="Unsqueeze" version="opset1">
+		<layer id="3860" name="Constant_87172" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7586, onnx::Unsqueeze_7585"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7586">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4964" name="onnx::Reshape_7587" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7587"/>
-			</rt_info>
+		<layer id="3861" name="/up_blocks.3/resnets.1/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7587">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm1/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4965" name="onnx::MatMul_7588" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7588"/>
-			</rt_info>
+		<layer id="3862" name="/up_blocks.3/resnets.1/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7588">
+				<port id="1" precision="FP32" names="/up_blocks.3/resnets.1/nonlinearity/Mul_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3863" name="up_blocks.3.resnets.1.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 640, 3, 3" offset="1695731282" size="3686400" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4966" name="Constant_148531" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3405810224" size="409600"/>
+		<layer id="3864" name="up_blocks.3.resnets.1.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7590, onnx::MatMul_9129"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+			<input>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.1.conv1.weight">
 					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4967" name="onnx::Add_7590" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7590, onnx::MatMul_9129"/>
-			</rt_info>
+		<layer id="3865" name="/up_blocks.3/resnets.1/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7590">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3866" name="Reshape_29570_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1699417682" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4968" name="input.1052" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3867" name="Reshape_29570" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1052"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1052">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4969" name="input.1056" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1056"/>
-			</rt_info>
+		<layer id="3868" name="/up_blocks.3/resnets.1/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1056">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/conv1/Conv_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4970" name="Constant_33330" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33330"/>
-			</rt_info>
+		<layer id="3869" name="up_blocks.3.resnets.1.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="1699418322" size="819200" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4971" name="onnx::Mul_7601" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
+		<layer id="3870" name="up_blocks.3.resnets.1.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7597, onnx::Div_7600, onnx::Mul_7601, onnx::Pow_7594, onnx::ReduceMean_7596, onnx::Sqrt_7599, onnx::Sub_7593"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7601">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4972" name="Constant_150592" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3406219824" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.1.time_emb_proj.weight">
 					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4973" name="onnx::Add_7602" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7602"/>
-			</rt_info>
+		<layer id="3871" name="/up_blocks.3/resnets.1/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7602">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4974" name="Constant_150593" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3406221104" size="1280"/>
+		<layer id="3872" name="Constant_87173_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320" offset="1700237522" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4975" name="onnx::MatMul_7603" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3873" name="Constant_87173" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7603"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7603">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4976" name="Constant_148539" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3406222384" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9130, q.231"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4977" name="q.231" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9130, q.231"/>
-			</rt_info>
+		<layer id="3874" name="/up_blocks.3/resnets.1/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>320</dim>
+					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.231">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/time_emb_proj/Gemm_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4978" name="Constant_109683" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="3875" name="/up_blocks.3/resnets.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.1/Constant_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4979" name="onnx::Gather_7610" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7610, onnx::Gather_7613, onnx::Gather_7616"/>
-			</rt_info>
+		<layer id="3876" name="/up_blocks.3/resnets.1/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7610,onnx::Gather_7613,onnx::Gather_7616">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/Unsqueeze_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4980" name="onnx::Gather_7617" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7617"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7617"/>
-			</output>
-		</layer>
-		<layer id="4981" name="Constant_33356" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33356"/>
-			</rt_info>
+		<layer id="3877" name="/up_blocks.3/resnets.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4982" name="onnx::Div_7618" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33356, onnx::Div_7618, onnx::Gather_7617"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.1/Constant_1_output_0">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7618"/>
-			</output>
-		</layer>
-		<layer id="4983" name="onnx::Div_7619" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7619"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7619"/>
 			</output>
 		</layer>
-		<layer id="4984" name="onnx::Cast_7620" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7620, onnx::Cast_7621, onnx::Div_7619, onnx::Unsqueeze_7622"/>
-			</rt_info>
+		<layer id="3878" name="/up_blocks.3/resnets.1/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7620,onnx::Cast_7621,onnx::Unsqueeze_7622"/>
-			</output>
-		</layer>
-		<layer id="4985" name="onnx::Unsqueeze_7630" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7630"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7630">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="4986" name="onnx::Concat_7631" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7631, onnx::Unsqueeze_7630"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7631">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/Unsqueeze_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4987" name="onnx::Reshape_7632" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7632"/>
-			</rt_info>
+		<layer id="3879" name="/up_blocks.3/resnets.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4988" name="onnx::Transpose_7633" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7633"/>
-			</rt_info>
+		<layer id="3880" name="/up_blocks.3/resnets.1/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.1/norm2/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3881" name="/up_blocks.3/resnets.1/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7633">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4989" name="Constant_33469" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33469"/>
-			</rt_info>
+		<layer id="3882" name="Constant_29618" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4990" name="onnx::Reshape_7634" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7634"/>
-			</rt_info>
+		<layer id="3883" name="MVN_29619" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7634">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm2/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4991" name="onnx::Gather_7611" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7611"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7611"/>
-			</output>
-		</layer>
-		<layer id="4992" name="Constant_33348" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33348"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="4993" name="onnx::Unsqueeze_7612" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33348, onnx::Gather_7611, onnx::Unsqueeze_7612"/>
-			</rt_info>
+		<layer id="3884" name="/up_blocks.3/resnets.1/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7612"/>
-			</output>
-		</layer>
-		<layer id="4994" name="onnx::Mul_7635" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7635"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7635"/>
+				<port id="1" precision="I64" names="/up_blocks.3/resnets.1/norm2/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4995" name="onnx::Unsqueeze_7636" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7635, onnx::Unsqueeze_7636"/>
-			</rt_info>
+		<layer id="3885" name="/up_blocks.3/resnets.1/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7636"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="4996" name="onnx::Unsqueeze_7641" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7641"/>
-			</rt_info>
+		<layer id="3886" name="Constant_87174_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1700238162" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7641">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4997" name="onnx::Concat_7642" type="Unsqueeze" version="opset1">
+		<layer id="3887" name="Constant_87174" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7642, onnx::Unsqueeze_7641"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7642">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="4998" name="Constant_90300" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33352, onnx::Concat_7644, onnx::Gather_7614, onnx::Unsqueeze_7615, onnx::Unsqueeze_7643"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="4999" name="Constant_33352" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33352"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5000" name="onnx::Unsqueeze_7615" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33352, onnx::Concat_7644, onnx::Gather_7614, onnx::Unsqueeze_7615, onnx::Unsqueeze_7643"/>
-			</rt_info>
+		<layer id="3888" name="/up_blocks.3/resnets.1/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7644">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="5001" name="onnx::Div_7637" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7637"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7637"/>
-			</output>
-		</layer>
-		<layer id="5002" name="onnx::Cast_7638" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7638, onnx::Cast_7639, onnx::Div_7637, onnx::Unsqueeze_7640"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7638,onnx::Cast_7639,onnx::Unsqueeze_7640"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5003" name="onnx::Unsqueeze_7645" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7645"/>
-			</rt_info>
+		<layer id="3889" name="Constant_87175_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1700238802" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7645">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5004" name="onnx::Concat_7646" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7646, onnx::Unsqueeze_7645"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7646">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5005" name="onnx::Reshape_7647" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3890" name="Constant_87175" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7647"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7647">
-					<dim>3</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5006" name="q.235" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.235"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
-			<output>
-				<port id="2" precision="FP32" names="q.235">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5007" name="Constant_148546" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="3406631984" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.231, onnx::MatMul_9131"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5008" name="k.231" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.231, onnx::MatMul_9131"/>
-			</rt_info>
+		<layer id="3891" name="/up_blocks.3/resnets.1/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.231">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/norm2/Add_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5009" name="onnx::Transpose_7662" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7662"/>
-			</rt_info>
+		<layer id="3892" name="/up_blocks.3/resnets.1/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7662">
+				<port id="1" precision="FP32" names="/up_blocks.3/resnets.1/nonlinearity_1/Mul_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5010" name="Constant_33590" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33590"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
-		</layer>
-		<layer id="5011" name="onnx::Reshape_7663" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7663"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-			</input>
+		</layer>
+		<layer id="3893" name="up_blocks.3.resnets.1.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="1700239442" size="1843200" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7663">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5012" name="k.235" type="Reshape" version="opset1">
-			<data special_zero="true"/>
+		<layer id="3894" name="up_blocks.3.resnets.1.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.235"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.235">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.1.conv2.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5013" name="onnx::Mul_7699" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7699"/>
-			</rt_info>
+		<layer id="3895" name="/up_blocks.3/resnets.1/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7699">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5014" name="Constant_150594" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
+		<layer id="3896" name="Reshape_29743_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1702082642" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5015" name="onnx::Softmax_7701" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3897" name="Reshape_29743" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_7701"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_7701">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5016" name="attn.115" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.115"/>
-			</rt_info>
+		<layer id="3898" name="/up_blocks.3/resnets.1/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="attn.115">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="5017" name="Constant_148553" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="3407615024" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9132, v.231"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/conv2/Conv_output_0">
+					<dim>2</dim>
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5018" name="v.231" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9132, v.231"/>
-			</rt_info>
+		<layer id="3899" name="/up_blocks.3/resnets.1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>2</dim>
 					<dim>320</dim>
-					<dim>768</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.231">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.1/Add_1_output_0,/up_blocks.3/resnets.1/Div_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5019" name="onnx::Transpose_7687" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7687"/>
-			</rt_info>
+		<layer id="3900" name="/up_blocks.3/attentions.1/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/norm/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3901" name="/up_blocks.3/attentions.1/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7687">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/norm/Reshape_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5020" name="Constant_33598" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33598"/>
-			</rt_info>
+		<layer id="3902" name="Constant_29831" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5021" name="onnx::Reshape_7688" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7688"/>
-			</rt_info>
+		<layer id="3903" name="MVN_29832" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7688">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/norm/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5022" name="v.235" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.235"/>
-			</rt_info>
+		<layer id="3904" name="/up_blocks.3/attentions.1/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.235">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64" names="/up_blocks.3/attentions.1/norm/Shape_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5023" name="out.115" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.115"/>
-			</rt_info>
+		<layer id="3905" name="/up_blocks.3/attentions.1/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.115">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/norm/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5024" name="onnx::Gather_7704" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7704, onnx::Gather_7707, onnx::Gather_7710"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-			</input>
+		<layer id="3906" name="Constant_87176_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1702083282" size="640" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7704,onnx::Gather_7707,onnx::Gather_7710">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5025" name="onnx::Gather_7705" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7705"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7705"/>
-			</output>
-		</layer>
-		<layer id="5026" name="Constant_33610" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3907" name="Constant_87176" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33610"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5027" name="onnx::Div_7706" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33610, onnx::Div_7706, onnx::Gather_7705"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_7706"/>
-			</output>
-		</layer>
-		<layer id="5028" name="onnx::Div_7713" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7713"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7713"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5029" name="onnx::Cast_7714" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7714, onnx::Cast_7715, onnx::Div_7713, onnx::Unsqueeze_7716"/>
-			</rt_info>
+		<layer id="3908" name="/up_blocks.3/attentions.1/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7714,onnx::Cast_7715,onnx::Unsqueeze_7716"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/norm/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5030" name="onnx::Unsqueeze_7718" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7718"/>
-			</rt_info>
+		<layer id="3909" name="Constant_87177_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1702083922" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7718">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5031" name="onnx::Concat_7719" type="Unsqueeze" version="opset1">
+		<layer id="3910" name="Constant_87177" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7719, onnx::Unsqueeze_7718"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7719">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5032" name="Constant_91006" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7726"/>
-			</rt_info>
+		<layer id="3911" name="/up_blocks.3/attentions.1/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/norm/Add_output_0">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5033" name="Constant_91007" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3912" name="up_blocks.3.attentions.1.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="1702084562" size="204800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5034" name="Gather_91008" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3913" name="up_blocks.3.attentions.1.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7726"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64">
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.3.attentions.1.proj_in.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3914" name="/up_blocks.3/attentions.1/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
+			<input>
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3915" name="Reshape_29954_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1702289362" size="640" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5035" name="onnx::Reshape_7726" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3916" name="Reshape_29954" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7726"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>2</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7726">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5036" name="onnx::Transpose_7727" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7727"/>
-			</rt_info>
+		<layer id="3917" name="/up_blocks.3/attentions.1/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7727">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/proj_in/Conv_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5037" name="Constant_33731" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33731"/>
-			</rt_info>
+		<layer id="3918" name="Constant_29982" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5038" name="onnx::Reshape_7728" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7728"/>
-			</rt_info>
+		<layer id="3919" name="/up_blocks.3/attentions.1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7728">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5039" name="onnx::Div_7729" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7729"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7729"/>
-			</output>
-		</layer>
-		<layer id="5040" name="onnx::Cast_7730" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7730, onnx::Cast_7731, onnx::Div_7729, onnx::Unsqueeze_7732"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7730,onnx::Cast_7731,onnx::Unsqueeze_7732"/>
-			</output>
-		</layer>
-		<layer id="5041" name="onnx::Unsqueeze_7735" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7735"/>
-			</rt_info>
+		<layer id="3920" name="/up_blocks.3/attentions.1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7735">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5042" name="onnx::Concat_7736" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7736, onnx::Unsqueeze_7735"/>
-			</rt_info>
+		<layer id="3921" name="/up_blocks.3/attentions.1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7736">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/Reshape_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5043" name="Constant_90327" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33614, onnx::Concat_7738, onnx::Gather_7708, onnx::Unsqueeze_7709, onnx::Unsqueeze_7737"/>
-			</rt_info>
+		<layer id="3922" name="Constant_29991" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5044" name="Constant_33614" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33614"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5045" name="onnx::Unsqueeze_7709" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33614, onnx::Concat_7738, onnx::Gather_7708, onnx::Unsqueeze_7709, onnx::Unsqueeze_7737"/>
-			</rt_info>
+		<layer id="3923" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7738">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm1/Div_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5046" name="onnx::Gather_7711" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7711"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7711"/>
-			</output>
-		</layer>
-		<layer id="5047" name="Constant_33618" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33618"/>
-			</rt_info>
+		<layer id="3924" name="Constant_87178_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1702290002" size="640" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5048" name="onnx::Unsqueeze_7712" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33618, onnx::Gather_7711, onnx::Unsqueeze_7712"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7712"/>
-			</output>
-		</layer>
-		<layer id="5049" name="onnx::Mul_7733" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7733"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7733"/>
 			</output>
 		</layer>
-		<layer id="5050" name="onnx::Unsqueeze_7734" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3925" name="Constant_87178" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7733, onnx::Unsqueeze_7734"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7734"/>
-			</output>
-		</layer>
-		<layer id="5051" name="onnx::Unsqueeze_7739" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7739"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7739">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5052" name="onnx::Concat_7740" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7740, onnx::Unsqueeze_7739"/>
-			</rt_info>
+		<layer id="3926" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7740">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3927" name="Constant_87179_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1702290642" size="640" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5053" name="onnx::Reshape_7741" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="3928" name="Constant_87179" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7741"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7741">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5054" name="onnx::MatMul_7742" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7742"/>
-			</rt_info>
+		<layer id="3929" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7742">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5055" name="Constant_148560" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3408598064" size="409600"/>
+		<layer id="3930" name="Constant_86030_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1702291282" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3931" name="Constant_86030" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7744, onnx::MatMul_9153"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5056" name="onnx::Add_7744" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7744, onnx::MatMul_9153"/>
-			</rt_info>
+		<layer id="3932" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -84297,147 +64868,129 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7744">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5057" name="input.1060" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1060"/>
-			</rt_info>
+		<layer id="3933" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3934" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1060">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5058" name="input.1064" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1064"/>
-			</rt_info>
+		<layer id="3935" name="Constant_30014" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3936" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1064">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5059" name="Constant_33852" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33852"/>
-			</rt_info>
+		<layer id="3937" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5060" name="onnx::Mul_7755" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7751, onnx::Div_7754, onnx::Mul_7755, onnx::Pow_7748, onnx::ReduceMean_7750, onnx::Sqrt_7753, onnx::Sub_7747"/>
-			</rt_info>
+		<layer id="3938" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7755">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5061" name="Constant_150596" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3409007664" size="1280"/>
+		<layer id="3939" name="Constant_86037_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1702496082" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5062" name="onnx::Add_7756" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3940" name="Constant_86037" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7756"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7756">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
 					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5063" name="Constant_150597" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3409008944" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5064" name="onnx::MatMul_7757" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7757"/>
-			</rt_info>
+		<layer id="3941" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -84445,554 +64998,490 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7757">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5065" name="Constant_148568" type="Const" version="opset1">
-			<data element_type="f32" shape="2560, 320" offset="3409010224" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7759, onnx::MatMul_9154"/>
-			</rt_info>
+		<layer id="3942" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>2560</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5066" name="onnx::Add_7759" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7759, onnx::MatMul_9154"/>
-			</rt_info>
+		<layer id="3943" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2560</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7759">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5067" name="onnx::Shape_7760" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_7760"/>
-			</rt_info>
+		<layer id="3944" name="Constant_30030" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3945" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>2560</dim>
-				</port>
-				<port id="1" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_7760">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_1_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5068" name="Constant_128481" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5069" name="Constant_128482" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
-			</rt_info>
+		<layer id="3946" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5070" name="Constant_128478" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5071" name="onnx::Gather_7761" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7761"/>
-			</rt_info>
+		<layer id="3947" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7761">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5072" name="onnx::Gather_7762" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3948" name="Constant_87180_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7762">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5073" name="Constant_33869" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33869"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5074" name="onnx::Add_7763" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="3949" name="Constant_87180" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_33869, onnx::Add_7763, onnx::Gather_7762"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_7763">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5075" name="onnx::Add_7765" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7765"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Add_7765">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5076" name="onnx::Div_7766" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7765, onnx::Div_7766"/>
-			</rt_info>
+		<layer id="3950" name="Multiply_86231" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_7766">
-					<dim>1</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5077" name="onnx::Div_7767" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7767"/>
-			</rt_info>
+		<layer id="3951" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_7767">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Add_output_0,/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5078" name="onnx::Mul_7768" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7767, onnx::Mul_7768, onnx::Mul_7769, onnx::Slice_7770"/>
-			</rt_info>
+		<layer id="3952" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_7768,onnx::Slice_7770">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5079" name="Constant_128477" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
-			</rt_info>
+		<layer id="3953" name="Constant_86044_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1702700882" size="204800" />
 			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5080" name="ScatterUpdate_128483" type="ScatterUpdate" version="opset3">
+		<layer id="3954" name="Constant_86044" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+			</output>
+		</layer>
+		<layer id="3955" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_v/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5081" name="Constant_128486" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
-			</rt_info>
+		<layer id="3956" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5082" name="onnx::Mul_7771" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33880, onnx::Gather_7762, onnx::Mul_7771"/>
-			</rt_info>
+		<layer id="3957" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_7771">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5083" name="Constant_128550" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5084" name="Constant_128549" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3958" name="Constant_30046" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5085" name="Constant_128548" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I32">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5086" name="ScatterUpdate_128551" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3959" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5087" name="Constant_128552" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3960" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_5_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5088" name="onnx::Mul_7772" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7772"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7772">
-					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5089" name="onnx::Slice_7773" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7772, onnx::Slice_7773"/>
-			</rt_info>
+		<layer id="3961" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_7773">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5090" name="ScatterUpdate_128553" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3962" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
 				</port>
-				<port id="3" precision="I32">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="I64">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5091" name="Constant_128556" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3963" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5092" name="onnx::Div_7774" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_33949, onnx::Div_7774, onnx::Gather_7762"/>
-			</rt_info>
+		<layer id="3964" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>2560</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="3" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_7774">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_6_output_0">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5093" name="onnx::Mul_7782" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7777, onnx::Erf_7776, onnx::Mul_7779, onnx::Mul_7780, onnx::Mul_7782"/>
-			</rt_info>
+		<layer id="3965" name="Constant_30072" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3966" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_7782">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5094" name="input.1068" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1068"/>
-			</rt_info>
+		<layer id="3967" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3968" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>1280</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1068">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5095" name="Constant_148576" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="3412287024" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7785, onnx::MatMul_9155"/>
-			</rt_info>
+		<layer id="3969" name="Constant_86051_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1702905682" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5096" name="onnx::Add_7785" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="3970" name="Constant_86051" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7785, onnx::MatMul_9155"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3971" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>1280</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7785">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5097" name="onnx::Add_7786" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7786"/>
-			</rt_info>
+		<layer id="3972" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -85006,18 +65495,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7786">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn1/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5098" name="onnx::Reshape_7787" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7787"/>
-			</rt_info>
+		<layer id="3973" name="/up_blocks.3/attentions.1/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -85031,55 +65517,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7787">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5099" name="Constant_91019" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7796"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5100" name="Constant_91020" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="3974" name="Constant_30084" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5101" name="Gather_91021" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7796"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7796">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5102" name="onnx::Transpose_7797" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7797"/>
-			</rt_info>
+		<layer id="3975" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -85087,1615 +65541,1474 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7797">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm2/Div_output_0">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5103" name="Constant_34114" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34114"/>
-			</rt_info>
+		<layer id="3976" name="Constant_87182_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1703110482" size="640" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5104" name="input.1072" type="Transpose" version="opset1">
+		<layer id="3977" name="Constant_87182" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1072"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3978" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1072">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm2/Mul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5105" name="m.up_blocks.3.attentions.1.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="3413925424" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.attentions.1.proj_out.weight"/>
-			</rt_info>
+		<layer id="3979" name="Constant_87183_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1703111122" size="640" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.attentions.1.proj_out.weight">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5106" name="Convolution_34116" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="3980" name="Constant_87183" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_34116"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
+			</input>
+			<output>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3981" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm2/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5107" name="Reshape_34136" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3414335024" size="1280"/>
+		<layer id="3982" name="Constant_86059_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1703111762" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5108" name="onnx::Add_7799" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3983" name="Constant_86059" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34135, Reshape_34136, onnx::Add_7799"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7799">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5109" name="onnx::Concat_7800" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7800"/>
-			</rt_info>
+		<layer id="3984" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Concat_7800">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5110" name="onnx::Cast_7801" type="Concat" version="opset1">
-			<data axis="1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1076, onnx::Cast_7801"/>
-			</rt_info>
+		<layer id="3985" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3986" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1076,onnx::Cast_7801">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5111" name="m.up_blocks.3.resnets.2.conv_shortcut.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 640, 1, 1" offset="3414336304" size="819200"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.2.conv_shortcut.weight"/>
-			</rt_info>
+		<layer id="3987" name="Constant_30107" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.2.conv_shortcut.weight">
-					<dim>320</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5112" name="Convolution_34506" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_34506"/>
-			</rt_info>
+		<layer id="3988" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>640</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5113" name="Reshape_34526" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3415155504" size="1280"/>
+		<layer id="3989" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3990" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
+			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3991" name="Constant_86066_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="1703316562" size="491520" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5114" name="onnx::Add_7846" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="3992" name="Constant_86066" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34525, Reshape_34526, onnx::Add_7846"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="3993" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7846">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5115" name="onnx::Reshape_7803" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7803"/>
-			</rt_info>
+		<layer id="3994" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_7803">
-					<dim>3</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5116" name="onnx::InstanceNormalization_7804" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_7804"/>
-			</rt_info>
+		<layer id="3995" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_7804">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5117" name="Constant_34176" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34176"/>
-			</rt_info>
+		<layer id="3996" name="Constant_30123" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5118" name="MVN_34177" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34196, Concat_34241, MVN_34177, Multiply_34224, Reshape_34197, Reshape_34242, onnx::Reshape_7807"/>
-			</rt_info>
+		<layer id="3997" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7807">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5119" name="onnx::Reshape_7808" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7808"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
+		<layer id="3998" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_7808">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5120" name="onnx::Mul_7809" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7809"/>
-			</rt_info>
+		<layer id="3999" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>81920</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7809">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_3_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5121" name="Constant_150600" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3415156784" size="2560"/>
+		<layer id="4000" name="Constant_87184_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5122" name="onnx::Add_7812" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4001" name="Constant_87184" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7812"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7812">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5123" name="Constant_150601" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 640, 1, 1" offset="3415159344" size="2560"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5124" name="onnx::Cast_7815" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1080, onnx::Cast_7815"/>
-			</rt_info>
+		<layer id="4002" name="Multiply_86233" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
-					<dim>640</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1080,onnx::Cast_7815">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5125" name="input.1084" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1084, onnx::Mul_7817"/>
-			</rt_info>
+		<layer id="4003" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-			</input>
-			<output>
-				<port id="1" precision="FP32" names="input.1084">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="5126" name="m.up_blocks.3.resnets.2.conv1.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 640, 3, 3" offset="3415161904" size="7372800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.2.conv1.weight"/>
-			</rt_info>
+			</input>
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.2.conv1.weight">
-					<dim>320</dim>
-					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Add_output_0,/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5127" name="Convolution_34282" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_34282"/>
-			</rt_info>
+		<layer id="4004" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>640</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>640</dim>
-					<dim>3</dim>
-					<dim>3</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5128" name="Reshape_34302" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3422534704" size="1280"/>
+		<layer id="4005" name="Constant_86073_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="1703808082" size="491520" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5129" name="onnx::Add_7819" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4006" name="Constant_86073" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34301, Reshape_34302, onnx::Add_7819"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7819">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5130" name="onnx::Gemm_7821" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gemm_7821, onnx::Mul_7820"/>
-			</rt_info>
+		<layer id="4007" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>768</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Gemm_7821">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
-					<dim>1280</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5131" name="m.up_blocks.3.resnets.2.time_emb_proj.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="3422535984" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.2.time_emb_proj.weight"/>
-			</rt_info>
+		<layer id="4008" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.2.time_emb_proj.weight">
-					<dim>320</dim>
-					<dim>1280</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5132" name="MatMul_34334" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="MatMul_34334"/>
-			</rt_info>
+		<layer id="4009" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>1280</dim>
-				</port>
-				<port id="1" precision="FP32">
+					<dim>77</dim>
 					<dim>320</dim>
-					<dim>1280</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5133" name="Constant_150602" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320" offset="3424174384" size="1280"/>
+		<layer id="4010" name="Constant_30139" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5134" name="onnx::Unsqueeze_7822" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Multiply_34335, onnx::Unsqueeze_7822"/>
-			</rt_info>
+		<layer id="4011" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
+					<dim>77</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_7822">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5135" name="onnx::Unsqueeze_7823" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7823"/>
-			</rt_info>
+		<layer id="4012" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7823">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5136" name="onnx::Unsqueeze_7824" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7824"/>
-			</rt_info>
+		<layer id="4013" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Unsqueeze_7824">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5137" name="onnx::Unsqueeze_7825" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7825"/>
-			</rt_info>
+		<layer id="4014" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7825">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5138" name="onnx::Add_7826" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7826"/>
-			</rt_info>
+		<layer id="4015" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4016" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7826">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_6_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5139" name="onnx::Cast_7827" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1088, onnx::Cast_7827"/>
-			</rt_info>
+		<layer id="4017" name="Constant_30165" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4018" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1088,onnx::Cast_7827">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Transpose_4_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5140" name="onnx::Reshape_7829" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7829"/>
-			</rt_info>
+		<layer id="4019" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_7829">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5141" name="onnx::InstanceNormalization_7830" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_7830"/>
-			</rt_info>
+		<layer id="4020" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_7830">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5142" name="Constant_34352" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
+		<layer id="4021" name="Constant_86080_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1704299602" size="204800" />
+			<output>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4022" name="Constant_86080" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34352"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5143" name="MVN_34353" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34372, Concat_34417, MVN_34353, Multiply_34400, Reshape_34373, Reshape_34418, onnx::Reshape_7833"/>
-			</rt_info>
+		<layer id="4023" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7833">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5144" name="onnx::Reshape_7834" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7834"/>
-			</rt_info>
+		<layer id="4024" name="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_7834">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/attn2/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5145" name="onnx::Mul_7835" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7835"/>
-			</rt_info>
+		<layer id="4025" name="/up_blocks.3/attentions.1/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7835">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5146" name="Constant_150603" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3424175664" size="1280"/>
+		<layer id="4026" name="Constant_30177" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5147" name="onnx::Add_7838" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7838"/>
-			</rt_info>
+		<layer id="4027" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7838">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5148" name="Constant_150604" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3424176944" size="1280"/>
+		<layer id="4028" name="Constant_87186_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1704504402" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5149" name="onnx::Cast_7841" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4029" name="Constant_87186" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1092, onnx::Cast_7841"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1092,onnx::Cast_7841">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5150" name="input.1096" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1096, onnx::Mul_7843"/>
-			</rt_info>
+		<layer id="4030" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.1096">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5151" name="m.up_blocks.3.resnets.2.conv2.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 3, 3" offset="3424178224" size="3686400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.resnets.2.conv2.weight"/>
-			</rt_info>
+		<layer id="4031" name="Constant_87187_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1704505042" size="640" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.resnets.2.conv2.weight">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5152" name="Convolution_34458" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="4032" name="Constant_87187" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_34458"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4033" name="/up_blocks.3/attentions.1/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>320</dim>
-					<dim>3</dim>
-					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5153" name="Reshape_34478" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3427864624" size="1280"/>
+		<layer id="4034" name="Constant_86088_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="2560, 320" offset="1704505682" size="1638400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5154" name="onnx::Add_7845" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4035" name="Constant_86088" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34477, Reshape_34478, onnx::Add_7845"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
 					<dim>320</dim>
-					<dim>1</dim>
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7845">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>2560</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5155" name="onnx::Div_7847" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1100, onnx::Div_7847"/>
-			</rt_info>
+		<layer id="4036" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>2560</dim>
 					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1100,onnx::Div_7847">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5156" name="onnx::Reshape_7862" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7862"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_7862">
-					<dim>3</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5157" name="onnx::InstanceNormalization_7863" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_7863"/>
-			</rt_info>
+		<layer id="4037" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>2560</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_7863">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5158" name="Constant_34582" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34582"/>
-			</rt_info>
+		<layer id="4038" name="Constant_79022" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5159" name="MVN_34583" type="MVN" version="opset6">
-			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34602, Concat_34647, MVN_34583, Multiply_34630, Reshape_34603, Reshape_34648, onnx::Reshape_7866"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="4039" name="Constant_79023" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7866">
-					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5160" name="onnx::Reshape_7867" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7850, onnx::Gather_7853, onnx::Gather_7856, onnx::Gather_7859, onnx::Reshape_7867"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-			</input>
+		<layer id="4040" name="Constant_79019" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7850,onnx::Gather_7853,onnx::Gather_7856,onnx::Gather_7859,onnx::Reshape_7867">
-					<dim>4</dim>
+				<port id="0" precision="I64">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5161" name="onnx::Mul_7868" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7868"/>
-			</rt_info>
+		<layer id="4041" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>32</dim>
-					<dim>40960</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7868">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="1" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Shape_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5162" name="Constant_150605" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3427865904" size="1280"/>
+		<layer id="4042" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5163" name="onnx::Add_7871" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7871"/>
-			</rt_info>
+		<layer id="4043" name="Constant_30194" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
+			<output>
+				<port id="0" precision="I64" />
+			</output>
+		</layer>
+		<layer id="4044" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7871">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="3" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Gather_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5164" name="Constant_150606" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3427867184" size="1280"/>
+		<layer id="4045" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5165" name="input.1104" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1104"/>
-			</rt_info>
+		<layer id="4046" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1104">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Add_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5166" name="m.up_blocks.3.attentions.2.proj_in.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="3427868464" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.attentions.2.proj_in.weight"/>
-			</rt_info>
+		<layer id="4047" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.attentions.2.proj_in.weight">
-					<dim>320</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5167" name="Convolution_34685" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_34685"/>
-			</rt_info>
+		<layer id="4048" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5168" name="Reshape_34705" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3428278064" size="1280"/>
+		<layer id="4049" name="Constant_79018" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>320</dim>
-					<dim>1</dim>
+				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5169" name="onnx::Transpose_7875" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_34704, Reshape_34705, onnx::Transpose_7875"/>
-			</rt_info>
+		<layer id="4050" name="ScatterUpdate_79024" type="ScatterUpdate" version="opset3">
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="0" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="FP32">
+				<port id="1" precision="I64">
 					<dim>1</dim>
-					<dim>320</dim>
+				</port>
+				<port id="2" precision="I64">
 					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7875">
-					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5170" name="Constant_34733" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34733"/>
-			</rt_info>
+		<layer id="4051" name="Constant_79027" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5171" name="onnx::Reshape_7876" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7876"/>
-			</rt_info>
+		<layer id="4052" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>320</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7876">
+				<port id="4" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
-					<dim>320</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5172" name="Constant_90372" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34559, onnx::Concat_7879, onnx::Gather_7851, onnx::Unsqueeze_7852, onnx::Unsqueeze_7878"/>
-			</rt_info>
+		<layer id="4053" name="Constant_79091" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4054" name="Constant_79090" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5173" name="Constant_34559" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34559"/>
-			</rt_info>
+		<layer id="4055" name="Constant_79089" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I32">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5174" name="onnx::Unsqueeze_7852" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34559, onnx::Concat_7879, onnx::Gather_7851, onnx::Unsqueeze_7852, onnx::Unsqueeze_7878"/>
-			</rt_info>
+		<layer id="4056" name="ScatterUpdate_79092" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7879">
-					<dim>1</dim>
+				<port id="4" precision="I64">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5175" name="onnx::Gather_7857" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7857"/>
-			</rt_info>
+		<layer id="4057" name="Constant_79093" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7857"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5176" name="Constant_34567" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34567"/>
-			</rt_info>
+		<layer id="4058" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Constant_5_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5177" name="onnx::Mul_7858" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34567, onnx::Gather_7857, onnx::Mul_7858"/>
-			</rt_info>
+		<layer id="4059" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_7858"/>
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_1_output_0">
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5178" name="onnx::Gather_7860" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="14129392" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7860"/>
-			</rt_info>
+		<layer id="4060" name="ScatterUpdate_79094" type="ScatterUpdate" version="opset3">
+			<input>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>1</dim>
+				</port>
+				<port id="3" precision="I32">
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7860"/>
+				<port id="4" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5179" name="Constant_34571" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34571"/>
-			</rt_info>
+		<layer id="4061" name="Constant_79097" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5180" name="onnx::Mul_7861" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34571, onnx::Gather_7860, onnx::Mul_7861"/>
-			</rt_info>
+		<layer id="4062" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>2560</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="2" precision="I64">
+					<dim>3</dim>
+				</port>
+				<port id="3" precision="I64">
+					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Mul_7861"/>
+				<port id="4" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Slice_1_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5181" name="onnx::Unsqueeze_7877" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7877"/>
-			</rt_info>
+		<layer id="4063" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7877"/>
-			</output>
-		</layer>
-		<layer id="5182" name="onnx::Unsqueeze_7880" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7880"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7880">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_3_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5183" name="onnx::Concat_7881" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7881, onnx::Unsqueeze_7880"/>
-			</rt_info>
+		<layer id="4064" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7881">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.0/Mul_4_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5184" name="Constant_90381" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34563, onnx::Concat_7883, onnx::Gather_7854, onnx::Unsqueeze_7855, onnx::Unsqueeze_7882"/>
-			</rt_info>
+		<layer id="4065" name="Constant_86096_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="1706144082" size="819200" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5185" name="Constant_34563" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="4066" name="Constant_86096" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34563"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5186" name="onnx::Unsqueeze_7855" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34563, onnx::Concat_7883, onnx::Gather_7854, onnx::Unsqueeze_7855, onnx::Unsqueeze_7882"/>
-			</rt_info>
+		<layer id="4067" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7883">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.2/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5187" name="onnx::Reshape_7884" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7884"/>
-			</rt_info>
+		<layer id="4068" name="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7884">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/ff/net.2/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5188" name="input.1108" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1108"/>
-			</rt_info>
+		<layer id="4069" name="/up_blocks.3/attentions.1/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>64</dim>
-					<dim>64</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1108">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5189" name="Constant_34818" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34818"/>
-			</rt_info>
+		<layer id="4070" name="/up_blocks.3/attentions.1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790174" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.1/Constant_1_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5190" name="onnx::Mul_7894" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7890, onnx::Div_7893, onnx::Mul_7894, onnx::Pow_7887, onnx::ReduceMean_7889, onnx::Sqrt_7892, onnx::Sub_7886"/>
-			</rt_info>
+		<layer id="4071" name="/up_blocks.3/attentions.1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -86703,1728 +67016,1820 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_7894">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5191" name="Constant_150607" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3428279344" size="1280"/>
+		<layer id="4072" name="Constant_30359" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5192" name="onnx::Add_7895" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_7895"/>
-			</rt_info>
+		<layer id="4073" name="/up_blocks.3/attentions.1/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_7895">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/Transpose_1_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5193" name="Constant_150608" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3428280624" size="1280"/>
+		<layer id="4074" name="up_blocks.3.attentions.1.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="1706963282" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5194" name="onnx::MatMul_7896" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4075" name="up_blocks.3.attentions.1.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_7896"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.3.attentions.1.proj_out.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4076" name="/up_blocks.3/attentions.1/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
-					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_7896">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5195" name="Constant_148587" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3428281904" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9162, q.239"/>
-			</rt_info>
+		<layer id="4077" name="Reshape_30381_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1707168082" size="640" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5196" name="q.239" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4078" name="Reshape_30381" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9162, q.239"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4079" name="/up_blocks.3/attentions.1/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>320</dim>
-					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.239">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/proj_out/Conv_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5197" name="Constant_109752" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="4080" name="/up_blocks.3/attentions.1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.1/Add_output_0">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5198" name="onnx::Gather_7903" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7903, onnx::Gather_7906, onnx::Gather_7909"/>
-			</rt_info>
+		<layer id="4081" name="/up_blocks.3/Concat_2" type="Concat" version="opset1">
+			<data axis="1" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7903,onnx::Gather_7906,onnx::Gather_7909">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/Concat_2_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5199" name="onnx::Gather_7910" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7910"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7910"/>
-			</output>
-		</layer>
-		<layer id="5200" name="Constant_34844" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34844"/>
-			</rt_info>
+		<layer id="4082" name="up_blocks.3.resnets.2.conv_shortcut.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 640, 1, 1" offset="1707168722" size="409600" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5201" name="onnx::Div_7911" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4083" name="up_blocks.3.resnets.2.conv_shortcut.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34844, onnx::Div_7911, onnx::Gather_7910"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_7911"/>
-			</output>
-		</layer>
-		<layer id="5202" name="onnx::Div_7912" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7912"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7912"/>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.2.conv_shortcut.weight">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5203" name="onnx::Cast_7913" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7913, onnx::Cast_7914, onnx::Div_7912, onnx::Unsqueeze_7915"/>
-			</rt_info>
+		<layer id="4084" name="/up_blocks.3/resnets.2/conv_shortcut/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7913,onnx::Cast_7914,onnx::Unsqueeze_7915"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5204" name="onnx::Unsqueeze_7923" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7923"/>
-			</rt_info>
+		<layer id="4085" name="Reshape_30766_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1707578322" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7923">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5205" name="onnx::Concat_7924" type="Unsqueeze" version="opset1">
+		<layer id="4086" name="Reshape_30766" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7924, onnx::Unsqueeze_7923"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7924">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5206" name="onnx::Reshape_7925" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7925"/>
-			</rt_info>
+		<layer id="4087" name="/up_blocks.3/resnets.2/conv_shortcut/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/conv_shortcut/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5207" name="onnx::Transpose_7926" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7926"/>
-			</rt_info>
+		<layer id="4088" name="/up_blocks.3/resnets.2/norm1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.2/norm1/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4089" name="/up_blocks.3/resnets.2/norm1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7926">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5208" name="Constant_34957" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34957"/>
-			</rt_info>
+		<layer id="4090" name="Constant_30420" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5209" name="onnx::Reshape_7927" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7927"/>
-			</rt_info>
+		<layer id="4091" name="MVN_30421" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7927">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm1/InstanceNormalization_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5210" name="onnx::Gather_7904" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7904"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7904"/>
-			</output>
-		</layer>
-		<layer id="5211" name="Constant_34836" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34836"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5212" name="onnx::Unsqueeze_7905" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34836, onnx::Gather_7904, onnx::Unsqueeze_7905"/>
-			</rt_info>
+		<layer id="4092" name="/up_blocks.3/resnets.2/norm1/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7905"/>
-			</output>
-		</layer>
-		<layer id="5213" name="onnx::Mul_7928" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7928"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7928"/>
+				<port id="1" precision="I64" names="/up_blocks.3/resnets.2/norm1/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5214" name="onnx::Unsqueeze_7929" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7928, onnx::Unsqueeze_7929"/>
-			</rt_info>
+		<layer id="4093" name="/up_blocks.3/resnets.2/norm1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>81920</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7929"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm1/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5215" name="onnx::Unsqueeze_7934" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7934"/>
-			</rt_info>
+		<layer id="4094" name="Constant_87190_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1707578962" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7934">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5216" name="onnx::Concat_7935" type="Unsqueeze" version="opset1">
+		<layer id="4095" name="Constant_87190" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7935, onnx::Unsqueeze_7934"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7935">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5217" name="Constant_90408" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34840, onnx::Concat_7937, onnx::Gather_7907, onnx::Unsqueeze_7908, onnx::Unsqueeze_7936"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5218" name="Constant_34840" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34840"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5219" name="onnx::Unsqueeze_7908" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_34840, onnx::Concat_7937, onnx::Gather_7907, onnx::Unsqueeze_7908, onnx::Unsqueeze_7936"/>
-			</rt_info>
+		<layer id="4096" name="/up_blocks.3/resnets.2/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7937">
 					<dim>1</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="5220" name="onnx::Div_7930" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7930"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7930"/>
-			</output>
-		</layer>
-		<layer id="5221" name="onnx::Cast_7931" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7931, onnx::Cast_7932, onnx::Div_7930, onnx::Unsqueeze_7933"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7931,onnx::Cast_7932,onnx::Unsqueeze_7933"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5222" name="onnx::Unsqueeze_7938" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7938"/>
-			</rt_info>
+		<layer id="4097" name="Constant_87191_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 640, 1, 1" offset="1707580242" size="1280" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7938">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5223" name="onnx::Concat_7939" type="Unsqueeze" version="opset1">
+		<layer id="4098" name="Constant_87191" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7939, onnx::Unsqueeze_7938"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7939">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5224" name="onnx::Reshape_7940" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7940"/>
-			</rt_info>
+		<layer id="4099" name="/up_blocks.3/resnets.2/norm1/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>640</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7940">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm1/Add_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5225" name="q.243" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.243"/>
-			</rt_info>
+		<layer id="4100" name="/up_blocks.3/resnets.2/nonlinearity/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.243">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/resnets.2/nonlinearity/Mul_output_0">
+					<dim>2</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5226" name="Constant_148594" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3428691504" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.239, onnx::MatMul_9163"/>
-			</rt_info>
+		<layer id="4101" name="up_blocks.3.resnets.2.conv1.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 640, 3, 3" offset="1707581522" size="3686400" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5227" name="k.239" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4102" name="up_blocks.3.resnets.2.conv1.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.239, onnx::MatMul_9163"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.2.conv1.weight">
+					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4103" name="/up_blocks.3/resnets.2/conv1/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>640</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
-					<dim>320</dim>
+					<dim>640</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.239">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5228" name="Constant_109821" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="4104" name="Reshape_30545_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1711267922" size="640" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5229" name="onnx::Gather_7942" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="4105" name="Reshape_30545" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7942, onnx::Gather_7945, onnx::Gather_7948"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7942,onnx::Gather_7945,onnx::Gather_7948">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5230" name="onnx::Gather_7949" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7949"/>
-			</rt_info>
+		<layer id="4106" name="/up_blocks.3/resnets.2/conv1/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7949"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/conv1/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5231" name="Constant_35085" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35085"/>
-			</rt_info>
+		<layer id="4107" name="up_blocks.3.resnets.2.time_emb_proj.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="1711268562" size="819200" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5232" name="onnx::Div_7950" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35085, onnx::Div_7950, onnx::Gather_7949"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Div_7950"/>
 			</output>
 		</layer>
-		<layer id="5233" name="onnx::Div_7951" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
+		<layer id="4108" name="up_blocks.3.resnets.2.time_emb_proj.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7951"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_7951"/>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.2.time_emb_proj.weight">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5234" name="onnx::Cast_7952" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7952, onnx::Cast_7953, onnx::Div_7951, onnx::Unsqueeze_7954"/>
-			</rt_info>
+		<layer id="4109" name="/up_blocks.3/resnets.2/time_emb_proj/Gemm/WithoutBiases" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>1280</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7952,onnx::Cast_7953,onnx::Unsqueeze_7954"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5235" name="onnx::Unsqueeze_7962" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7962"/>
-			</rt_info>
+		<layer id="4110" name="Constant_87192_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320" offset="1712087762" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7962">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5236" name="onnx::Concat_7963" type="Unsqueeze" version="opset1">
+		<layer id="4111" name="Constant_87192" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7963, onnx::Unsqueeze_7962"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7963">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5237" name="onnx::Reshape_7964" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_7964"/>
-			</rt_info>
+		<layer id="4112" name="/up_blocks.3/resnets.2/time_emb_proj/Gemm" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/time_emb_proj/Gemm_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5238" name="onnx::Transpose_7965" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_7965"/>
-			</rt_info>
+		<layer id="4113" name="/up_blocks.3/resnets.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.2/Constant_output_0">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4114" name="/up_blocks.3/resnets.2/Unsqueeze" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_7965">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/Unsqueeze_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5239" name="Constant_35198" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35198"/>
-			</rt_info>
+		<layer id="4115" name="/up_blocks.3/resnets.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="7064752" size="8" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.2/Constant_1_output_0">
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5240" name="onnx::Reshape_7966" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7966"/>
-			</rt_info>
+		<layer id="4116" name="/up_blocks.3/resnets.2/Unsqueeze_1" type="Unsqueeze" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_7966">
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/Unsqueeze_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5241" name="onnx::Gather_7943" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7943"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7943"/>
-			</output>
-		</layer>
-		<layer id="5242" name="Constant_35077" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35077"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5243" name="onnx::Unsqueeze_7944" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35077, onnx::Gather_7943, onnx::Unsqueeze_7944"/>
-			</rt_info>
+		<layer id="4117" name="/up_blocks.3/resnets.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7944"/>
-			</output>
-		</layer>
-		<layer id="5244" name="onnx::Mul_7967" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7967"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_7967"/>
-			</output>
-		</layer>
-		<layer id="5245" name="onnx::Unsqueeze_7968" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_7967, onnx::Unsqueeze_7968"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_7968"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5246" name="onnx::Unsqueeze_7973" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7973"/>
-			</rt_info>
+		<layer id="4118" name="/up_blocks.3/resnets.2/norm2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7973">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/resnets.2/norm2/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5247" name="onnx::Concat_7974" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7974, onnx::Unsqueeze_7973"/>
-			</rt_info>
+		<layer id="4119" name="/up_blocks.3/resnets.2/norm2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7974">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm2/Reshape_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5248" name="Constant_90435" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35081, onnx::Concat_7976, onnx::Gather_7946, onnx::Unsqueeze_7947, onnx::Unsqueeze_7975"/>
-			</rt_info>
+		<layer id="4120" name="Constant_30593" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5249" name="Constant_35081" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35081"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5250" name="onnx::Unsqueeze_7947" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35081, onnx::Concat_7976, onnx::Gather_7946, onnx::Unsqueeze_7947, onnx::Unsqueeze_7975"/>
-			</rt_info>
+		<layer id="4121" name="MVN_30594" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_7976">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm2/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5251" name="onnx::Div_7969" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7969"/>
-			</rt_info>
+		<layer id="4122" name="/up_blocks.3/resnets.2/norm2/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_7969"/>
+				<port id="1" precision="I64" names="/up_blocks.3/resnets.2/norm2/Shape_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5252" name="onnx::Cast_7970" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7970, onnx::Cast_7971, onnx::Div_7969, onnx::Unsqueeze_7972"/>
-			</rt_info>
+		<layer id="4123" name="/up_blocks.3/resnets.2/norm2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7970,onnx::Cast_7971,onnx::Unsqueeze_7972"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm2/Reshape_1_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5253" name="onnx::Unsqueeze_7977" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_7977"/>
-			</rt_info>
+		<layer id="4124" name="Constant_87193_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1712088402" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_7977">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5254" name="onnx::Concat_7978" type="Unsqueeze" version="opset1">
+		<layer id="4125" name="Constant_87193" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_7978, onnx::Unsqueeze_7977"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_7978">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5255" name="onnx::Reshape_7979" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_7979"/>
-			</rt_info>
+		<layer id="4126" name="/up_blocks.3/resnets.2/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_7979">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5256" name="k.243" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.243"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
+		<layer id="4127" name="Constant_87194_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1712089042" size="640" />
 			<output>
-				<port id="2" precision="FP32" names="k.243">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5257" name="onnx::Mul_8020" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
+		<layer id="4128" name="Constant_87194" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8020"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_8020">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5258" name="Constant_150609" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
-			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5259" name="onnx::Softmax_8022" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_8022"/>
-			</rt_info>
+		<layer id="4129" name="/up_blocks.3/resnets.2/norm2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_8022">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/norm2/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5260" name="attn.119" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.119"/>
-			</rt_info>
+		<layer id="4130" name="/up_blocks.3/resnets.2/nonlinearity_1/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="attn.119">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/resnets.2/nonlinearity_1/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5261" name="Constant_148601" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3429101104" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9164, v.239"/>
-			</rt_info>
+		<layer id="4131" name="up_blocks.3.resnets.2.conv2.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 3, 3" offset="1712089682" size="1843200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5262" name="v.239" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4132" name="up_blocks.3.resnets.2.conv2.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9164, v.239"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.3.resnets.2.conv2.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4133" name="/up_blocks.3/resnets.2/conv2/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="FP32">
 					<dim>320</dim>
 					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.239">
+				<port id="2" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5263" name="Constant_109890" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="4134" name="Reshape_30718_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1713932882" size="640" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5264" name="onnx::Gather_7981" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="4135" name="Reshape_30718" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7981, onnx::Gather_7984, onnx::Gather_7987"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_7981,onnx::Gather_7984,onnx::Gather_7987">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5265" name="onnx::Gather_7988" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7988"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7988"/>
-			</output>
-		</layer>
-		<layer id="5266" name="Constant_35326" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35326"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5267" name="onnx::Div_7989" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35326, onnx::Div_7989, onnx::Gather_7988"/>
-			</rt_info>
+		<layer id="4136" name="/up_blocks.3/resnets.2/conv2/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_7989"/>
-			</output>
-		</layer>
-		<layer id="5268" name="onnx::Div_7990" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_7990"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_7990"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/conv2/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5269" name="onnx::Cast_7991" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_7991, onnx::Cast_7992, onnx::Div_7990, onnx::Unsqueeze_7993"/>
-			</rt_info>
+		<layer id="4137" name="/up_blocks.3/resnets.2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_7991,onnx::Cast_7992,onnx::Unsqueeze_7993"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/resnets.2/Add_1_output_0,/up_blocks.3/resnets.2/Div_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5270" name="onnx::Unsqueeze_8001" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8001"/>
-			</rt_info>
+		<layer id="4138" name="/up_blocks.3/attentions.2/norm/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8001">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/norm/Constant_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5271" name="onnx::Concat_8002" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8002, onnx::Unsqueeze_8001"/>
-			</rt_info>
+		<layer id="4139" name="/up_blocks.3/attentions.2/norm/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8002">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/norm/Reshape_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4140" name="Constant_30806" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
+			<output>
+				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5272" name="onnx::Reshape_8003" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8003"/>
-			</rt_info>
+		<layer id="4141" name="MVN_30807" type="MVN" version="opset6">
+			<data eps="9.9999999747524271e-07" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>4</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/norm/InstanceNormalization_output_0">
+					<dim>2</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5273" name="onnx::Transpose_8004" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8004"/>
-			</rt_info>
+		<layer id="4142" name="/up_blocks.3/attentions.2/norm/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8004">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5274" name="Constant_35439" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35439"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+				<port id="1" precision="I64" names="/up_blocks.3/attentions.2/norm/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5275" name="onnx::Reshape_8005" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8005"/>
-			</rt_info>
+		<layer id="4143" name="/up_blocks.3/attentions.2/norm/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>32</dim>
+					<dim>40960</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8005">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/norm/Reshape_1_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5276" name="onnx::Gather_7982" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_7982"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_7982"/>
-			</output>
-		</layer>
-		<layer id="5277" name="Constant_35318" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35318"/>
-			</rt_info>
+		<layer id="4144" name="Constant_87195_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1713933522" size="640" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5278" name="onnx::Unsqueeze_7983" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4145" name="Constant_87195" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35318, onnx::Gather_7982, onnx::Unsqueeze_7983"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_7983"/>
-			</output>
-		</layer>
-		<layer id="5279" name="onnx::Mul_8006" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8006"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_8006"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5280" name="onnx::Unsqueeze_8007" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8006, onnx::Unsqueeze_8007"/>
-			</rt_info>
+		<layer id="4146" name="/up_blocks.3/attentions.2/norm/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_8007"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/norm/Mul_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5281" name="onnx::Unsqueeze_8012" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8012"/>
-			</rt_info>
+		<layer id="4147" name="Constant_87196_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1713934162" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8012">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5282" name="onnx::Concat_8013" type="Unsqueeze" version="opset1">
+		<layer id="4148" name="Constant_87196" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8013, onnx::Unsqueeze_8012"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8013">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5283" name="Constant_90462" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35322, onnx::Concat_8015, onnx::Gather_7985, onnx::Unsqueeze_7986, onnx::Unsqueeze_8014"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
+		<layer id="4149" name="/up_blocks.3/attentions.2/norm/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/norm/Add_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5284" name="Constant_35322" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35322"/>
-			</rt_info>
+		<layer id="4150" name="up_blocks.3.attentions.2.proj_in.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="1713934802" size="204800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5285" name="onnx::Unsqueeze_7986" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4151" name="up_blocks.3.attentions.2.proj_in.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35322, onnx::Concat_8015, onnx::Gather_7985, onnx::Unsqueeze_7986, onnx::Unsqueeze_8014"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_8015">
+				<port id="1" precision="FP32" names="up_blocks.3.attentions.2.proj_in.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5286" name="onnx::Div_8008" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8008"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_8008"/>
-			</output>
-		</layer>
-		<layer id="5287" name="onnx::Cast_8009" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8009, onnx::Cast_8010, onnx::Div_8008, onnx::Unsqueeze_8011"/>
-			</rt_info>
+		<layer id="4152" name="/up_blocks.3/attentions.2/proj_in/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8009,onnx::Cast_8010,onnx::Unsqueeze_8011"/>
+				<port id="2" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5288" name="onnx::Unsqueeze_8016" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8016"/>
-			</rt_info>
+		<layer id="4153" name="Reshape_30929_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1714139602" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8016">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5289" name="onnx::Concat_8017" type="Unsqueeze" version="opset1">
+		<layer id="4154" name="Reshape_30929" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8017, onnx::Unsqueeze_8016"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8017">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5290" name="onnx::Reshape_8018" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8018"/>
-			</rt_info>
+		<layer id="4155" name="/up_blocks.3/attentions.2/proj_in/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
-				<port id="1" precision="I64">
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8018">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/proj_in/Conv_output_0">
+					<dim>2</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5291" name="v.243" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.243"/>
-			</rt_info>
+		<layer id="4156" name="Constant_30957" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9116600" size="32" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4157" name="/up_blocks.3/attentions.2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+					<dim>64</dim>
+					<dim>64</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.243">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5292" name="out.119" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.119"/>
-			</rt_info>
+		<layer id="4158" name="/up_blocks.3/attentions.2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/Constant_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4159" name="/up_blocks.3/attentions.2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>4096</dim>
+					<dim>2</dim>
+					<dim>64</dim>
+					<dim>64</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="out.119">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/Reshape_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5293" name="onnx::Gather_8025" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8025, onnx::Gather_8028, onnx::Gather_8031"/>
-			</rt_info>
+		<layer id="4160" name="Constant_30966" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
+			<output>
+				<port id="0" precision="I64">
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4161" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm1/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_8025,onnx::Gather_8028,onnx::Gather_8031">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm1/Div_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5294" name="onnx::Gather_8026" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8026"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8026"/>
-			</output>
-		</layer>
-		<layer id="5295" name="Constant_35564" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35564"/>
-			</rt_info>
+		<layer id="4162" name="Constant_87197_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1714140242" size="640" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5296" name="onnx::Div_8027" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4163" name="Constant_87197" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35564, onnx::Div_8027, onnx::Gather_8026"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_8027"/>
-			</output>
-		</layer>
-		<layer id="5297" name="onnx::Div_8034" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8034"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_8034"/>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5298" name="onnx::Cast_8035" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8035, onnx::Cast_8036, onnx::Div_8034, onnx::Unsqueeze_8037"/>
-			</rt_info>
+		<layer id="4164" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm1/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8035,onnx::Cast_8036,onnx::Unsqueeze_8037"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm1/Mul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5299" name="onnx::Unsqueeze_8039" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8039"/>
-			</rt_info>
+		<layer id="4165" name="Constant_87198_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1714140882" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8039">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5300" name="onnx::Concat_8040" type="Unsqueeze" version="opset1">
+		<layer id="4166" name="Constant_87198" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8040, onnx::Unsqueeze_8039"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8040">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5301" name="Constant_91038" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8047"/>
-			</rt_info>
+		<layer id="4167" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm1/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="I64">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm1/Add_1_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5302" name="Constant_91039" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
+		<layer id="4168" name="Constant_86106_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1714141522" size="204800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5303" name="Gather_91040" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4169" name="Constant_86106" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8047"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5304" name="onnx::Reshape_8047" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8047"/>
-			</rt_info>
+		<layer id="4170" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64">
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_q/MatMul_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="4171" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8047">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5305" name="onnx::Transpose_8048" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8048"/>
-			</rt_info>
+		<layer id="4172" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8048">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
+					<dim>8</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5306" name="Constant_35685" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35685"/>
-			</rt_info>
+		<layer id="4173" name="Constant_30989" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5307" name="onnx::Reshape_8049" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8049"/>
-			</rt_info>
+		<layer id="4174" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
+					<dim>8</dim>
 					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -88432,218 +68837,164 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8049">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>8</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5308" name="onnx::Div_8050" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8050"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_8050"/>
-			</output>
-		</layer>
-		<layer id="5309" name="onnx::Cast_8051" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8051, onnx::Cast_8052, onnx::Div_8050, onnx::Unsqueeze_8053"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8051,onnx::Cast_8052,onnx::Unsqueeze_8053"/>
-			</output>
-		</layer>
-		<layer id="5310" name="onnx::Unsqueeze_8056" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8056"/>
-			</rt_info>
+		<layer id="4175" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8056">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5311" name="onnx::Concat_8057" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8057, onnx::Unsqueeze_8056"/>
-			</rt_info>
+		<layer id="4176" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8057">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5312" name="Constant_90489" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35568, onnx::Concat_8059, onnx::Gather_8029, onnx::Unsqueeze_8030, onnx::Unsqueeze_8058"/>
-			</rt_info>
+		<layer id="4177" name="Constant_86113_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1714346322" size="204800" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5313" name="Constant_35568" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35568"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5314" name="onnx::Unsqueeze_8030" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4178" name="Constant_86113" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35568, onnx::Concat_8059, onnx::Gather_8029, onnx::Unsqueeze_8030, onnx::Unsqueeze_8058"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_8059">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5315" name="onnx::Gather_8032" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8032"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8032"/>
-			</output>
-		</layer>
-		<layer id="5316" name="Constant_35572" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35572"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5317" name="onnx::Unsqueeze_8033" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35572, onnx::Gather_8032, onnx::Unsqueeze_8033"/>
-			</rt_info>
+		<layer id="4179" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_8033"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_k/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5318" name="onnx::Mul_8054" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8054"/>
-			</rt_info>
+		<layer id="4180" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_8054"/>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5319" name="onnx::Unsqueeze_8055" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8054, onnx::Unsqueeze_8055"/>
-			</rt_info>
+		<layer id="4181" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_8055"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_2_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5320" name="onnx::Unsqueeze_8060" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8060"/>
-			</rt_info>
+		<layer id="4182" name="Constant_31005" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8060">
-					<dim>1</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5321" name="onnx::Concat_8061" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8061, onnx::Unsqueeze_8060"/>
-			</rt_info>
+		<layer id="4183" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_1" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8061">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_1_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5322" name="onnx::Reshape_8062" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8062"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="4184" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8062">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_3_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5323" name="onnx::MatMul_8063" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8063"/>
-			</rt_info>
+		<layer id="4185" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>8</dim>
+					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -88651,183 +69002,133 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_8063">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_3_output_0">
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5324" name="Constant_148608" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3429510704" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8065, onnx::MatMul_9169"/>
-			</rt_info>
+		<layer id="4186" name="Constant_87199_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5325" name="onnx::Add_8065" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4187" name="Constant_87199" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8065, onnx::MatMul_9169"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8065">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5326" name="input.1112" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1112"/>
-			</rt_info>
+		<layer id="4188" name="Multiply_86235" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>320</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1112">
-					<dim>2</dim>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5327" name="input.1116" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1116"/>
-			</rt_info>
+		<layer id="4189" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1116">
-					<dim>2</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Add_output_0,/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5328" name="Constant_35806" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35806"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5329" name="onnx::Mul_8076" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8072, onnx::Div_8075, onnx::Mul_8076, onnx::Pow_8069, onnx::ReduceMean_8071, onnx::Sqrt_8074, onnx::Sub_8068"/>
-			</rt_info>
+		<layer id="4190" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>2</dim>
+					<dim>16</dim>
+					<dim>4096</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_8076">
-					<dim>2</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Cast_output_0,/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5330" name="Constant_150611" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3429920304" size="1280"/>
+		<layer id="4191" name="Constant_86120_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1714551122" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5331" name="onnx::Add_8077" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4192" name="Constant_86120" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8077"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="0" precision="FP16">
 					<dim>320</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8077">
-					<dim>2</dim>
-					<dim>4096</dim>
+				<port id="1" precision="FP32">
 					<dim>320</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5332" name="Constant_150612" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3429921584" size="1280"/>
-			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5333" name="onnx::MatMul_8078" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8078"/>
-			</rt_info>
+		<layer id="4193" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -88835,203 +69136,178 @@
 					<dim>320</dim>
 				</port>
 				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_8078">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_v/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5334" name="Constant_148616" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3429922864" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9170, q.247"/>
-			</rt_info>
+		<layer id="4194" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_4_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5335" name="q.247" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9170, q.247"/>
-			</rt_info>
+		<layer id="4195" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>320</dim>
-					<dim>320</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.247">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5336" name="Constant_109959" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="4287456" size="16"/>
+		<layer id="4196" name="Constant_31021" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
-					<dim>2</dim>
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5337" name="onnx::Gather_8085" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8085, onnx::Gather_8088, onnx::Gather_8091"/>
-			</rt_info>
+		<layer id="4197" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>8</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_8085,onnx::Gather_8088,onnx::Gather_8091">
-					<dim>3</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_2_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5338" name="onnx::Gather_8092" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8092"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8092"/>
-			</output>
-		</layer>
-		<layer id="5339" name="Constant_35832" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35832"/>
-			</rt_info>
+		<layer id="4198" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5340" name="onnx::Div_8093" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35832, onnx::Div_8093, onnx::Gather_8092"/>
-			</rt_info>
+		<layer id="4199" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
 					<dim>3</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_8093"/>
-			</output>
-		</layer>
-		<layer id="5341" name="onnx::Div_8094" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8094"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_8094"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5342" name="onnx::Cast_8095" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8095, onnx::Cast_8096, onnx::Div_8094, onnx::Unsqueeze_8097"/>
-			</rt_info>
+		<layer id="4200" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>4096</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8095,onnx::Cast_8096,onnx::Unsqueeze_8097"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5343" name="onnx::Unsqueeze_8105" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8105"/>
-			</rt_info>
+		<layer id="4201" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8105">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5344" name="onnx::Concat_8106" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8106, onnx::Unsqueeze_8105"/>
-			</rt_info>
+		<layer id="4202" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8106">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5345" name="onnx::Reshape_8107" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8107"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>2</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="4203" name="Constant_31047" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="3" precision="I64">
+				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5346" name="onnx::Transpose_8108" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8108"/>
-			</rt_info>
+		<layer id="4204" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_4" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>320</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8108">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Transpose_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>8</dim>
@@ -89039,21 +69315,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5347" name="Constant_35945" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35945"/>
-			</rt_info>
+		<layer id="4205" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Constant_9_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5348" name="onnx::Reshape_8109" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8109"/>
-			</rt_info>
+		<layer id="4206" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -89062,452 +69333,405 @@
 					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>4</dim>
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8109">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/Reshape_7_output_0">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5349" name="onnx::Gather_8086" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8086"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8086"/>
-			</output>
-		</layer>
-		<layer id="5350" name="Constant_35824" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35824"/>
-			</rt_info>
+		<layer id="4207" name="Constant_86127_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1714755922" size="204800" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5351" name="onnx::Unsqueeze_8087" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4208" name="Constant_86127" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35824, onnx::Gather_8086, onnx::Unsqueeze_8087"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_8087"/>
-			</output>
-		</layer>
-		<layer id="5352" name="onnx::Mul_8110" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8110"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_8110"/>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5353" name="onnx::Unsqueeze_8111" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8110, onnx::Unsqueeze_8111"/>
-			</rt_info>
+		<layer id="4209" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_8111"/>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_out.0/MatMul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5354" name="onnx::Unsqueeze_8116" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8116"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8116">
+		<layer id="4210" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
+			<input>
+				<port id="0" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn1/to_out.0/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5355" name="onnx::Concat_8117" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8117, onnx::Unsqueeze_8116"/>
-			</rt_info>
+		<layer id="4211" name="/up_blocks.3/attentions.2/transformer_blocks.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8117">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/Add_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5356" name="Constant_90516" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35828, onnx::Concat_8119, onnx::Gather_8089, onnx::Unsqueeze_8090, onnx::Unsqueeze_8118"/>
-			</rt_info>
+		<layer id="4212" name="Constant_31059" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5357" name="Constant_35828" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35828"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5358" name="onnx::Unsqueeze_8090" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_35828, onnx::Concat_8119, onnx::Gather_8089, onnx::Unsqueeze_8090, onnx::Unsqueeze_8118"/>
-			</rt_info>
+		<layer id="4213" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm2/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_8119">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm2/Div_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5359" name="onnx::Div_8112" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8112"/>
-			</rt_info>
+		<layer id="4214" name="Constant_87201_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1714960722" size="640" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_8112"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5360" name="onnx::Cast_8113" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
+		<layer id="4215" name="Constant_87201" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8113, onnx::Cast_8114, onnx::Div_8112, onnx::Unsqueeze_8115"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8113,onnx::Cast_8114,onnx::Unsqueeze_8115"/>
-			</output>
-		</layer>
-		<layer id="5361" name="onnx::Unsqueeze_8120" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8120"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8120">
+				<port id="1" precision="FP32">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5362" name="onnx::Concat_8121" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8121, onnx::Unsqueeze_8120"/>
-			</rt_info>
+		<layer id="4216" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm2/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8121">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm2/Mul_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4217" name="Constant_87202_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1714961362" size="640" />
+			<output>
+				<port id="0" precision="FP16">
 					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5363" name="onnx::Reshape_8122" type="Concat" version="opset1">
-			<data axis="0"/>
+		<layer id="4218" name="Constant_87202" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8122"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
 					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8122">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5364" name="q.251" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="q.251"/>
-			</rt_info>
+		<layer id="4219" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm2/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="q.251">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm2/Add_1_output_0">
+					<dim>2</dim>
 					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5365" name="Constant_148623" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="3430332464" size="983040"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.247, onnx::MatMul_9171"/>
-			</rt_info>
+		<layer id="4220" name="Constant_86135_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1714962002" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5366" name="k.247" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4221" name="Constant_86135" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="k.247, onnx::MatMul_9171"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>77</dim>
-					<dim>768</dim>
-				</port>
-				<port id="1" precision="FP32">
+				<port id="0" precision="FP16">
+					<dim>320</dim>
 					<dim>320</dim>
-					<dim>768</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="k.247">
-					<dim>2</dim>
-					<dim>77</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5367" name="onnx::Transpose_8137" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8137"/>
-			</rt_info>
+		<layer id="4222" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_q/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
+					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8137">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_q/MatMul_output_0">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5368" name="Constant_36066" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36066"/>
-			</rt_info>
+		<layer id="4223" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322744" size="32" />
 			<output>
-				<port id="0" precision="I64">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5369" name="onnx::Reshape_8138" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8138"/>
-			</rt_info>
+		<layer id="4224" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
-					<dim>77</dim>
-					<dim>8</dim>
-					<dim>40</dim>
+					<dim>4096</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8138">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_output_0">
 					<dim>2</dim>
+					<dim>4096</dim>
 					<dim>8</dim>
-					<dim>77</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5370" name="k.251" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="k.251"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>2</dim>
-					<dim>8</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>3</dim>
-				</port>
-			</input>
+		<layer id="4225" name="Constant_31082" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="2" precision="FP32" names="k.251">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="0" precision="I64">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5371" name="onnx::Mul_8174" type="Einsum" version="opset7">
-			<data equation="bid,bjd-&gt;bij"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8174"/>
-			</rt_info>
+		<layer id="4226" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
 					<dim>4096</dim>
+					<dim>8</dim>
 					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
+				<port id="1" precision="I64">
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_8174">
-					<dim>16</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5372" name="Constant_150613" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 1" offset="19054992" size="4"/>
+		<layer id="4227" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9322808" size="24" />
 			<output>
-				<port id="0" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_1_output_0">
+					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5373" name="onnx::Softmax_8176" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Softmax_8176"/>
-			</rt_info>
+		<layer id="4228" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
+					<dim>2</dim>
+					<dim>8</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>40</dim>
 				</port>
-				<port id="1" precision="FP32">
-					<dim>1</dim>
-					<dim>1</dim>
-					<dim>1</dim>
+				<port id="1" precision="I64">
+					<dim>3</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Softmax_8176">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_1_output_0">
 					<dim>16</dim>
 					<dim>4096</dim>
-					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5374" name="attn.123" type="SoftMax" version="opset8">
-			<data axis="-1"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="attn.123"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-			</input>
+		<layer id="4229" name="Constant_86142_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="1715166802" size="491520" />
 			<output>
-				<port id="1" precision="FP32" names="attn.123">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5375" name="Constant_148630" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 768" offset="3431315504" size="983040"/>
+		<layer id="4230" name="Constant_86142" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9172, v.247"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
+				</port>
+			</input>
 			<output>
-				<port id="0" precision="FP32">
+				<port id="1" precision="FP32">
 					<dim>320</dim>
 					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5376" name="v.247" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_9172, v.247"/>
-			</rt_info>
+		<layer id="4231" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_k/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -89520,18 +69744,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.247">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_k/MatMul_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5377" name="onnx::Transpose_8162" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8162"/>
-			</rt_info>
+		<layer id="4232" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_2_output_0">
+					<dim>4</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4233" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_2" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -89543,7 +69772,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8162">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_2_output_0">
 					<dim>2</dim>
 					<dim>77</dim>
 					<dim>8</dim>
@@ -89551,21 +69780,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5378" name="Constant_36074" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36074"/>
-			</rt_info>
+		<layer id="4234" name="Constant_31098" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5379" name="onnx::Reshape_8163" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8163"/>
-			</rt_info>
+		<layer id="4235" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -89578,7 +69801,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8163">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>8</dim>
 					<dim>77</dim>
@@ -89586,11 +69809,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5380" name="v.251" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="v.251"/>
-			</rt_info>
+		<layer id="4236" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
+			<output>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_3_output_0">
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4237" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_3" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -89603,242 +69831,195 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="v.251">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_3_output_0">
 					<dim>16</dim>
 					<dim>77</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5381" name="out.123" type="Einsum" version="opset7">
-			<data equation="bij,bjd-&gt;bid"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="out.123"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>77</dim>
-				</port>
-				<port id="1" precision="FP32">
-					<dim>16</dim>
-					<dim>77</dim>
-					<dim>40</dim>
-				</port>
-			</input>
+		<layer id="4238" name="Constant_87203_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 1" offset="9527632" size="2" />
 			<output>
-				<port id="2" precision="FP32" names="out.123">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5382" name="onnx::Gather_8179" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
+		<layer id="4239" name="Constant_87203" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8179, onnx::Gather_8182, onnx::Gather_8185"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_8179,onnx::Gather_8182,onnx::Gather_8185">
-					<dim>3</dim>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5383" name="onnx::Gather_8180" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8180"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8180"/>
-			</output>
-		</layer>
-		<layer id="5384" name="Constant_36086" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36086"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5385" name="onnx::Div_8181" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36086, onnx::Div_8181, onnx::Gather_8180"/>
-			</rt_info>
+		<layer id="4240" name="Multiply_86237" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>1</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Div_8181"/>
-			</output>
-		</layer>
-		<layer id="5386" name="onnx::Div_8188" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8188"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Div_8188"/>
+				<port id="2" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5387" name="onnx::Cast_8189" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8189, onnx::Cast_8190, onnx::Div_8188, onnx::Unsqueeze_8191"/>
-			</rt_info>
+		<layer id="4241" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Mul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8189,onnx::Cast_8190,onnx::Unsqueeze_8191"/>
-			</output>
-		</layer>
-		<layer id="5388" name="onnx::Unsqueeze_8193" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8193"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8193">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Add_output_0,/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Mul_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5389" name="onnx::Concat_8194" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8194, onnx::Unsqueeze_8193"/>
-			</rt_info>
+		<layer id="4242" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Softmax" type="SoftMax" version="opset8">
+			<data axis="-1" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8194">
-					<dim>1</dim>
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Cast_output_0,/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Softmax_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5390" name="Constant_91048" type="Const" version="opset1">
-			<data element_type="i64" shape="2" offset="19464604" size="16"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8201"/>
-			</rt_info>
+		<layer id="4243" name="Constant_86149_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 768" offset="1715658322" size="491520" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5391" name="Constant_91049" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5392" name="Gather_91050" type="Gather" version="opset8">
-			<data batch_dims="0"/>
+		<layer id="4244" name="Constant_86149" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8201"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>2</dim>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64">
-					<dim>2</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5393" name="onnx::Reshape_8201" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8343, onnx::Reshape_8201"/>
-			</rt_info>
+		<layer id="4245" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_v/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>768</dim>
 				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>768</dim>
 				</port>
-				<port id="2" precision="I64">
+			</input>
+			<output>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_v/MatMul_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
-			</input>
+			</output>
+		</layer>
+		<layer id="4246" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_4" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="10634866" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8201">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_4_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5394" name="onnx::Transpose_8202" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8202"/>
-			</rt_info>
+		<layer id="4247" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_4" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
-					<dim>16</dim>
-					<dim>4096</dim>
-					<dim>40</dim>
+					<dim>2</dim>
+					<dim>77</dim>
+					<dim>320</dim>
 				</port>
 				<port id="1" precision="I64">
 					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8202">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_4_output_0">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>8</dim>
-					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5395" name="Constant_36207" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18645336" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36207"/>
-			</rt_info>
+		<layer id="4248" name="Constant_31114" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5396" name="onnx::Reshape_8203" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8203"/>
-			</rt_info>
+		<layer id="4249" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_2" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
+					<dim>77</dim>
 					<dim>8</dim>
-					<dim>4096</dim>
 					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
@@ -89846,213 +70027,133 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8203">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_2_output_0">
 					<dim>2</dim>
-					<dim>4096</dim>
 					<dim>8</dim>
+					<dim>77</dim>
 					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5397" name="onnx::Div_8204" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8204"/>
-			</rt_info>
+		<layer id="4250" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="10634898" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_8204"/>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_5_output_0">
+					<dim>3</dim>
+				</port>
 			</output>
 		</layer>
-		<layer id="5398" name="onnx::Cast_8205" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Cast_8205, onnx::Cast_8206, onnx::Div_8204, onnx::Unsqueeze_8207"/>
-			</rt_info>
+		<layer id="4251" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_5" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>77</dim>
+					<dim>40</dim>
+				</port>
+				<port id="1" precision="I64">
+					<dim>3</dim>
+				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Cast_8205,onnx::Cast_8206,onnx::Unsqueeze_8207"/>
-			</output>
-		</layer>
-		<layer id="5399" name="onnx::Unsqueeze_8210" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8210"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8210">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_5_output_0">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5400" name="onnx::Concat_8211" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8211, onnx::Unsqueeze_8210"/>
-			</rt_info>
+		<layer id="4252" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/MatMul_1" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="false" />
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>77</dim>
+				</port>
+				<port id="1" precision="FP32">
+					<dim>16</dim>
+					<dim>77</dim>
+					<dim>40</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8211">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/MatMul_1_output_0">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5401" name="Constant_90543" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36090, onnx::Concat_8213, onnx::Gather_8183, onnx::Unsqueeze_8184, onnx::Unsqueeze_8212"/>
-			</rt_info>
+		<layer id="4253" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_8" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9732434" size="32" />
 			<output>
-				<port id="0" precision="I64">
-					<dim>1</dim>
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_8_output_0">
+					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5402" name="Constant_36090" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36090"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5403" name="onnx::Unsqueeze_8184" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36090, onnx::Concat_8213, onnx::Gather_8183, onnx::Unsqueeze_8184, onnx::Unsqueeze_8212"/>
-			</rt_info>
+		<layer id="4254" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_6" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
-				<port id="0" precision="I64">
-					<dim>3</dim>
+				<port id="0" precision="FP32">
+					<dim>16</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
-				<port id="2" precision="I64"/>
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Concat_8213">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_6_output_0">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5404" name="onnx::Gather_8186" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8186"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8186"/>
-			</output>
-		</layer>
-		<layer id="5405" name="Constant_36094" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36094"/>
-			</rt_info>
+		<layer id="4255" name="Constant_31140" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="9322776" size="32" />
 			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5406" name="onnx::Unsqueeze_8187" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36094, onnx::Gather_8186, onnx::Unsqueeze_8187"/>
-			</rt_info>
-			<input>
 				<port id="0" precision="I64">
-					<dim>3</dim>
+					<dim>4</dim>
 				</port>
-				<port id="1" precision="I64"/>
-				<port id="2" precision="I64"/>
-			</input>
-			<output>
-				<port id="3" precision="I64" names="onnx::Unsqueeze_8187"/>
-			</output>
-		</layer>
-		<layer id="5407" name="onnx::Mul_8208" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="19464596" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8208"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Mul_8208"/>
 			</output>
 		</layer>
-		<layer id="5408" name="onnx::Unsqueeze_8209" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8208, onnx::Unsqueeze_8209"/>
-			</rt_info>
+		<layer id="4256" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_4" type="Transpose" version="opset1">
 			<input>
-				<port id="0" precision="I64"/>
-				<port id="1" precision="I64"/>
-			</input>
-			<output>
-				<port id="2" precision="I64" names="onnx::Unsqueeze_8209"/>
-			</output>
-		</layer>
-		<layer id="5409" name="onnx::Unsqueeze_8214" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Unsqueeze_8214"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64" names="onnx::Unsqueeze_8214">
-					<dim>1</dim>
+				<port id="0" precision="FP32">
+					<dim>2</dim>
+					<dim>8</dim>
+					<dim>4096</dim>
+					<dim>40</dim>
 				</port>
-			</output>
-		</layer>
-		<layer id="5410" name="onnx::Concat_8215" type="Unsqueeze" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Concat_8215, onnx::Unsqueeze_8214"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64"/>
 				<port id="1" precision="I64">
-					<dim>1</dim>
+					<dim>4</dim>
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Concat_8215">
-					<dim>1</dim>
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Transpose_4_output_0">
+					<dim>2</dim>
+					<dim>4096</dim>
+					<dim>8</dim>
+					<dim>40</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5411" name="onnx::Reshape_8216" type="Concat" version="opset1">
-			<data axis="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8216"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>1</dim>
-				</port>
-				<port id="2" precision="I64">
-					<dim>1</dim>
-				</port>
-			</input>
+		<layer id="4257" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_9" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="9116632" size="24" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8216">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Constant_9_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5412" name="onnx::MatMul_8217" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8217"/>
-			</rt_info>
+		<layer id="4258" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_7" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90065,30 +70166,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_8217">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/Reshape_7_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5413" name="Constant_148637" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320" offset="3432298544" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8219, onnx::MatMul_9193"/>
-			</rt_info>
+		<layer id="4259" name="Constant_86156_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320" offset="1716149842" size="204800" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5414" name="onnx::Add_8219" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4260" name="Constant_86156" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8219, onnx::MatMul_9193"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4261" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_out.0/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90101,18 +70214,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8219">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_out.0/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5415" name="input.1120" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1120"/>
-			</rt_info>
+		<layer id="4262" name="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_out.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -90126,18 +70236,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1120">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/attn2/to_out.0/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5416" name="input.1124" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1124"/>
-			</rt_info>
+		<layer id="4263" name="/up_blocks.3/attentions.2/transformer_blocks.0/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90151,29 +70258,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1124">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5417" name="Constant_36328" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36328"/>
-			</rt_info>
+		<layer id="4264" name="Constant_31152" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5418" name="onnx::Mul_8230" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8226, onnx::Div_8229, onnx::Mul_8230, onnx::Pow_8223, onnx::ReduceMean_8225, onnx::Sqrt_8228, onnx::Sub_8222"/>
-			</rt_info>
+		<layer id="4265" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm3/Div" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90185,28 +70286,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_8230">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm3/Div_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5419" name="Constant_150615" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3432708144" size="1280"/>
+		<layer id="4266" name="Constant_87205_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1716354642" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5420" name="onnx::Add_8231" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4267" name="Constant_87205" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8231"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4268" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm3/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90220,28 +70338,45 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8231">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm3/Mul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5421" name="Constant_150616" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 1, 320" offset="3432709424" size="1280"/>
+		<layer id="4269" name="Constant_87206_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 1, 320" offset="1716355282" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>1</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5422" name="onnx::MatMul_8232" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4270" name="Constant_87206" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::MatMul_8232"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>1</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4271" name="/up_blocks.3/attentions.2/transformer_blocks.0/norm3/Add_1" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90255,30 +70390,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::MatMul_8232">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/norm3/Add_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5423" name="Constant_148645" type="Const" version="opset1">
-			<data element_type="f32" shape="2560, 320" offset="3432710704" size="3276800"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8234, onnx::MatMul_9194"/>
-			</rt_info>
+		<layer id="4272" name="Constant_86164_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="2560, 320" offset="1716355922" size="1638400" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>2560</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5424" name="onnx::Add_8234" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4273" name="Constant_86164" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8234, onnx::MatMul_9194"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>2560</dim>
+					<dim>320</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4274" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/proj/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90291,18 +70438,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8234">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/proj/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5425" name="onnx::Shape_8235" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Shape_8235"/>
-			</rt_info>
+		<layer id="4275" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/proj/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -90316,51 +70460,39 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Shape_8235">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/proj/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>2560</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5426" name="Constant_128682" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4276" name="Constant_79223" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5427" name="Constant_128683" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4277" name="Constant_79224" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5428" name="Constant_128679" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4278" name="Constant_79220" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5429" name="onnx::Gather_8236" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8236"/>
-			</rt_info>
+		<layer id="4279" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90369,36 +70501,27 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Gather_8236">
+				<port id="1" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Shape_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5430" name="onnx::Gather_8237" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="18233136" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4280" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="9116656" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Gather_8237">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5431" name="Constant_36345" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36345"/>
-			</rt_info>
+		<layer id="4281" name="Constant_31169" type="Const" version="opset1">
+			<data element_type="i64" shape="" offset="2143392" size="8" />
 			<output>
-				<port id="0" precision="I64"/>
+				<port id="0" precision="I64" />
 			</output>
 		</layer>
-		<layer id="5432" name="onnx::Add_8238" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36345, onnx::Add_8238, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4282" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Gather" type="Gather" version="opset8">
+			<data batch_dims="0" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -90406,30 +70529,24 @@
 				<port id="1" precision="I64">
 					<dim>1</dim>
 				</port>
-				<port id="2" precision="I64"/>
+				<port id="2" precision="I64" />
 			</input>
 			<output>
-				<port id="3" precision="I64" names="onnx::Add_8238">
+				<port id="3" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Gather_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5433" name="onnx::Add_8240" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="4286760" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8240"/>
-			</rt_info>
+		<layer id="4283" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_2" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="2143400" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Add_8240">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_2_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5434" name="onnx::Div_8241" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8240, onnx::Div_8241"/>
-			</rt_info>
+		<layer id="4284" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -90439,27 +70556,21 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Div_8241">
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Add_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5435" name="onnx::Div_8242" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8242"/>
-			</rt_info>
+		<layer id="4285" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_3" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Div_8242">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_3_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5436" name="onnx::Mul_8243" type="Divide" version="opset1">
-			<data auto_broadcast="numpy" m_pythondiv="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Div_8242, onnx::Mul_8243, onnx::Mul_8244, onnx::Slice_8245"/>
-			</rt_info>
+		<layer id="4286" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Div" type="Divide" version="opset1">
+			<data auto_broadcast="numpy" m_pythondiv="true" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -90469,26 +70580,20 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Mul_8243,onnx::Slice_8245">
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Div_output_0,/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5437" name="Constant_128678" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4287" name="Constant_79219" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5438" name="ScatterUpdate_128684" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4288" name="ScatterUpdate_79225" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -90509,22 +70614,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5439" name="Constant_128687" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4289" name="Constant_79228" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5440" name="onnx::Mul_8246" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36356, onnx::Gather_8237, onnx::Mul_8246"/>
-			</rt_info>
+		<layer id="4290" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Slice" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90542,50 +70641,38 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Mul_8246">
+				<port id="4" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Slice_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5441" name="Constant_128751" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4291" name="Constant_79292" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5442" name="Constant_128750" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4292" name="Constant_79291" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5443" name="Constant_128749" type="Const" version="opset1">
-			<data element_type="i32" shape="1" offset="25941500" size="4"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4293" name="Constant_79290" type="Const" version="opset1">
+			<data element_type="i32" shape="1" offset="12970946" size="4" />
 			<output>
 				<port id="0" precision="I32">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5444" name="ScatterUpdate_128752" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4294" name="ScatterUpdate_79293" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -90606,33 +70693,24 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5445" name="Constant_128753" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941476" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4295" name="Constant_79294" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970922" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5446" name="onnx::Mul_8247" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8247"/>
-			</rt_info>
+		<layer id="4296" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_5" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Mul_8247">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Constant_5_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5447" name="onnx::Slice_8248" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8247, onnx::Slice_8248"/>
-			</rt_info>
+		<layer id="4297" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_1" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="I64">
 					<dim>1</dim>
@@ -90642,15 +70720,12 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="I64" names="onnx::Slice_8248">
+				<port id="2" precision="I64" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_1_output_0">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5448" name="ScatterUpdate_128754" type="ScatterUpdate" version="opset3">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4298" name="ScatterUpdate_79295" type="ScatterUpdate" version="opset3">
 			<input>
 				<port id="0" precision="I64">
 					<dim>3</dim>
@@ -90671,22 +70746,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5449" name="Constant_128757" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="25941504" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4299" name="Constant_79298" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="12970950" size="24" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5450" name="onnx::Div_8249" type="StridedSlice" version="opset1">
-			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask=""/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Broadcast_36425, onnx::Div_8249, onnx::Gather_8237"/>
-			</rt_info>
+		<layer id="4300" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Slice_1" type="StridedSlice" version="opset1">
+			<data begin_mask="1, 1, 0" end_mask="1, 1, 0" new_axis_mask="" shrink_axis_mask="" ellipsis_mask="" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90704,18 +70773,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="4" precision="FP32" names="onnx::Div_8249">
+				<port id="4" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Slice_1_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5451" name="onnx::Mul_8257" type="Gelu" version="opset7">
-			<data approximation_mode="ERF"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8252, onnx::Erf_8251, onnx::Mul_8254, onnx::Mul_8255, onnx::Mul_8257"/>
-			</rt_info>
+		<layer id="4301" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_3" type="Gelu" version="opset7">
+			<data approximation_mode="ERF" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90724,18 +70790,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="onnx::Mul_8257">
+				<port id="1" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_3_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5452" name="input.1128" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1128"/>
-			</rt_info>
+		<layer id="4302" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_4" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90749,30 +70812,42 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1128">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.0/Mul_4_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5453" name="Constant_148653" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 1280" offset="3435987504" size="1638400"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8260, onnx::MatMul_9195"/>
-			</rt_info>
+		<layer id="4303" name="Constant_86172_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 1280" offset="1717994322" size="819200" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>1280</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5454" name="onnx::Add_8260" type="MatMul" version="opset1">
-			<data transpose_a="false" transpose_b="true"/>
+		<layer id="4304" name="Constant_86172" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8260, onnx::MatMul_9195"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>320</dim>
+					<dim>1280</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4305" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.2/MatMul" type="MatMul" version="opset1">
+			<data transpose_a="false" transpose_b="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90785,18 +70860,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8260">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.2/MatMul_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5455" name="onnx::Add_8261" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8261"/>
-			</rt_info>
+		<layer id="4306" name="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>1</dim>
@@ -90810,18 +70882,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8261">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/ff/net.2/Add_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5456" name="onnx::Reshape_8262" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8262"/>
-			</rt_info>
+		<layer id="4307" name="/up_blocks.3/attentions.2/transformer_blocks.0/Add_2" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90835,55 +70904,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8262">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/transformer_blocks.0/Add_2_output_0">
 					<dim>2</dim>
 					<dim>4096</dim>
 					<dim>320</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5457" name="Constant_91061" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="18233080" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8271"/>
-			</rt_info>
-			<output>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-			</output>
-		</layer>
-		<layer id="5458" name="Constant_91062" type="Const" version="opset1">
-			<data element_type="i64" shape="" offset="4286752" size="8"/>
-			<output>
-				<port id="0" precision="I64"/>
-			</output>
-		</layer>
-		<layer id="5459" name="Gather_91063" type="Gather" version="opset8">
-			<data batch_dims="0"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8271"/>
-			</rt_info>
-			<input>
-				<port id="0" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="1" precision="I64">
-					<dim>4</dim>
-				</port>
-				<port id="2" precision="I64"/>
-			</input>
+		<layer id="4308" name="/up_blocks.3/attentions.2/Constant_1" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790174" size="32" />
 			<output>
-				<port id="3" precision="I64" names="onnx::Reshape_8271">
+				<port id="0" precision="I64" names="/up_blocks.3/attentions.2/Constant_1_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5460" name="onnx::Transpose_8272" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Transpose_8272"/>
-			</rt_info>
+		<layer id="4309" name="/up_blocks.3/attentions.2/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90895,7 +70932,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Transpose_8272">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>64</dim>
 					<dim>64</dim>
@@ -90903,21 +70940,15 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5461" name="Constant_36590" type="Const" version="opset1">
-			<data element_type="i64" shape="4" offset="27579960" size="32"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36590"/>
-			</rt_info>
+		<layer id="4310" name="Constant_31334" type="Const" version="opset1">
+			<data element_type="i64" shape="4" offset="13790206" size="32" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5462" name="input.1132" type="Transpose" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1132"/>
-			</rt_info>
+		<layer id="4311" name="/up_blocks.3/attentions.2/Transpose_1" type="Transpose" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90930,7 +70961,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1132">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/Transpose_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -90938,13 +70969,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5463" name="m.up_blocks.3.attentions.2.proj_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="320, 320, 1, 1" offset="3437625904" size="409600"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.up_blocks.3.attentions.2.proj_out.weight"/>
-			</rt_info>
+		<layer id="4312" name="up_blocks.3.attentions.2.proj_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="320, 320, 1, 1" offset="1718813522" size="204800" />
 			<output>
-				<port id="0" precision="FP32" names="m.up_blocks.3.attentions.2.proj_out.weight">
+				<port id="0" precision="FP16">
 					<dim>320</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -90952,11 +70980,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5464" name="Convolution_36592" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit"/>
+		<layer id="4313" name="up_blocks.3.attentions.2.proj_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_36592"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="up_blocks.3.attentions.2.proj_out.weight">
+					<dim>320</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4314" name="/up_blocks.3/attentions.2/proj_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -90980,10 +71027,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5465" name="Reshape_36612" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3438035504" size="1280"/>
+		<layer id="4315" name="Reshape_31356_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1719018322" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -90991,11 +71038,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5466" name="onnx::Add_8274" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4316" name="Reshape_31356" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_36611, Reshape_36612, onnx::Add_8274"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4317" name="/up_blocks.3/attentions.2/proj_out/Conv" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91011,7 +71077,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8274">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/proj_out/Conv_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -91019,11 +71085,8 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5467" name="onnx::Cast_8275" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1136, onnx::Cast_8275"/>
-			</rt_info>
+		<layer id="4318" name="/up_blocks.3/attentions.2/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91039,7 +71102,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1136,onnx::Cast_8275">
+				<port id="2" precision="FP32" names="/up_blocks.3/attentions.2/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -91047,22 +71110,16 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5468" name="onnx::Reshape_8277" type="Const" version="opset1">
-			<data element_type="i64" shape="3" offset="596480" size="24"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8277"/>
-			</rt_info>
+		<layer id="4319" name="/conv_norm_out/Constant" type="Const" version="opset1">
+			<data element_type="i64" shape="3" offset="298240" size="24" />
 			<output>
-				<port id="0" precision="I64" names="onnx::Reshape_8277">
+				<port id="0" precision="I64" names="/conv_norm_out/Constant_output_0">
 					<dim>3</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5469" name="onnx::InstanceNormalization_8278" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::InstanceNormalization_8278"/>
-			</rt_info>
+		<layer id="4320" name="/conv_norm_out/Reshape" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91075,29 +71132,23 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::InstanceNormalization_8278">
+				<port id="2" precision="FP32" names="/conv_norm_out/Reshape_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5470" name="Constant_36651" type="Const" version="opset1">
-			<data element_type="i64" shape="1" offset="596504" size="8"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Constant_36651"/>
-			</rt_info>
+		<layer id="4321" name="Constant_31394" type="Const" version="opset1">
+			<data element_type="i64" shape="1" offset="298264" size="8" />
 			<output>
 				<port id="0" precision="I64">
 					<dim>1</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5471" name="MVN_36652" type="MVN" version="opset6">
-			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_36671, Concat_36716, MVN_36652, Multiply_36699, Reshape_36672, Reshape_36717, onnx::Reshape_8281"/>
-			</rt_info>
+		<layer id="4322" name="MVN_31395" type="MVN" version="opset6">
+			<data eps="9.9999997473787516e-06" normalize_variance="true" eps_mode="INSIDE_SQRT" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91109,18 +71160,15 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Reshape_8281">
+				<port id="2" precision="FP32" names="/conv_norm_out/InstanceNormalization_output_0">
 					<dim>2</dim>
 					<dim>32</dim>
 					<dim>40960</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5472" name="onnx::Reshape_8282" type="ShapeOf" version="opset3">
-			<data output_type="i64"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Reshape_8282"/>
-			</rt_info>
+		<layer id="4323" name="/conv_norm_out/Shape" type="ShapeOf" version="opset3">
+			<data output_type="i64" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91130,16 +71178,13 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="I64" names="onnx::Reshape_8282">
+				<port id="1" precision="I64" names="/conv_norm_out/Shape_output_0">
 					<dim>4</dim>
 				</port>
 			</output>
 		</layer>
-		<layer id="5473" name="onnx::Mul_8283" type="Reshape" version="opset1">
-			<data special_zero="true"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Mul_8283"/>
-			</rt_info>
+		<layer id="4324" name="/conv_norm_out/Reshape_1" type="Reshape" version="opset1">
+			<data special_zero="true" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91151,7 +71196,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Mul_8283">
+				<port id="2" precision="FP32" names="/conv_norm_out/Reshape_1_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -91159,10 +71204,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5474" name="Constant_150619" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3438036784" size="1280"/>
+		<layer id="4325" name="Constant_87209_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1719018962" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -91170,11 +71215,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5475" name="onnx::Add_8286" type="Multiply" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4326" name="Constant_87209" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="onnx::Add_8286"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4327" name="/conv_norm_out/Mul" type="Multiply" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91190,7 +71254,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="onnx::Add_8286">
+				<port id="2" precision="FP32" names="/conv_norm_out/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -91198,10 +71262,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5476" name="Constant_150620" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 320, 1, 1" offset="3438038064" size="1280"/>
+		<layer id="4328" name="Constant_87210_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 320, 1, 1" offset="1719019602" size="640" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>320</dim>
 					<dim>1</dim>
@@ -91209,11 +71273,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5477" name="onnx::Cast_8289" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4329" name="Constant_87210" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1140, onnx::Cast_8289"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>320</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4330" name="/conv_norm_out/Add" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91229,7 +71312,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="input.1140,onnx::Cast_8289">
+				<port id="2" precision="FP32" names="/conv_norm_out/Add_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -91237,10 +71320,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5478" name="input.1144" type="Swish" version="opset4">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="input.1144, onnx::Mul_8291"/>
-			</rt_info>
+		<layer id="4331" name="/conv_act/Mul" type="Swish" version="opset4">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91250,7 +71330,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="1" precision="FP32" names="input.1144">
+				<port id="1" precision="FP32" names="/conv_act/Mul_output_0">
 					<dim>2</dim>
 					<dim>320</dim>
 					<dim>64</dim>
@@ -91258,13 +71338,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5479" name="m.conv_out.weight" type="Const" version="opset1">
-			<data element_type="f32" shape="4, 320, 3, 3" offset="3438039344" size="46080"/>
-			<rt_info>
-				<attribute name="fused_names" version="0" value="m.conv_out.weight"/>
-			</rt_info>
+		<layer id="4332" name="conv_out.weight_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="4, 320, 3, 3" offset="1719020242" size="23040" />
 			<output>
-				<port id="0" precision="FP32" names="m.conv_out.weight">
+				<port id="0" precision="FP16">
 					<dim>4</dim>
 					<dim>320</dim>
 					<dim>3</dim>
@@ -91272,11 +71349,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5480" name="Convolution_36757" type="Convolution" version="opset1">
-			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
+		<layer id="4333" name="conv_out.weight" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Convolution_36757"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>4</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32" names="conv_out.weight">
+					<dim>4</dim>
+					<dim>320</dim>
+					<dim>3</dim>
+					<dim>3</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4334" name="/conv_out/Conv/WithoutBiases" type="Convolution" version="opset1">
+			<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91300,10 +71396,10 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5481" name="Reshape_36777" type="Const" version="opset1">
-			<data element_type="f32" shape="1, 4, 1, 1" offset="3438085424" size="16"/>
+		<layer id="4335" name="Reshape_31519_compressed" type="Const" version="opset1">
+			<data element_type="f16" shape="1, 4, 1, 1" offset="1719043282" size="8" />
 			<output>
-				<port id="0" precision="FP32">
+				<port id="0" precision="FP16">
 					<dim>1</dim>
 					<dim>4</dim>
 					<dim>1</dim>
@@ -91311,11 +71407,30 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5482" name="noise_pred" type="Add" version="opset1">
-			<data auto_broadcast="numpy"/>
+		<layer id="4336" name="Reshape_31519" type="Convert" version="opset1">
+			<data destination_type="f32" />
 			<rt_info>
-				<attribute name="fused_names" version="0" value="Concat_36776, Reshape_36777, noise_pred"/>
+				<attribute name="decompression" version="0" />
 			</rt_info>
+			<input>
+				<port id="0" precision="FP16">
+					<dim>1</dim>
+					<dim>4</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</input>
+			<output>
+				<port id="1" precision="FP32">
+					<dim>1</dim>
+					<dim>4</dim>
+					<dim>1</dim>
+					<dim>1</dim>
+				</port>
+			</output>
+		</layer>
+		<layer id="4337" name="out_sample" type="Add" version="opset1">
+			<data auto_broadcast="numpy" />
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91331,7 +71446,7 @@
 				</port>
 			</input>
 			<output>
-				<port id="2" precision="FP32" names="noise_pred">
+				<port id="2" precision="FP32" names="out_sample">
 					<dim>2</dim>
 					<dim>4</dim>
 					<dim>64</dim>
@@ -91339,10 +71454,7 @@
 				</port>
 			</output>
 		</layer>
-		<layer id="5483" name="noise_pred/sink_port_0" type="Result" version="opset1">
-			<rt_info>
-				<attribute name="fused_names" version="0" value="noise_pred/sink_port_0"/>
-			</rt_info>
+		<layer id="4338" name="out_sample/sink_port_0" type="Result" version="opset1">
 			<input>
 				<port id="0" precision="FP32">
 					<dim>2</dim>
@@ -91354,6433 +71466,4699 @@
 		</layer>
 	</layers>
 	<edges>
-		<edge from-layer="0" from-port="0" to-layer="797" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="4283" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="2852" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="4293" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="2842" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="4650" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="4660" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="5008" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="5018" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="537" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="5366" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="5376" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="265" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="809" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1059" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="2494" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="2484" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1069" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1329" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1341" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1591" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1949" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1939" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="1601" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="3210" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="3200" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="3925" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="527" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="3567" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="3577" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="277" to-port="0"/>
-		<edge from-layer="0" from-port="0" to-layer="3935" to-port="0"/>
-		<edge from-layer="1" from-port="0" to-layer="87" to-port="0"/>
-		<edge from-layer="2" from-port="0" to-layer="68" to-port="0"/>
-		<edge from-layer="3" from-port="0" to-layer="5455" to-port="0"/>
-		<edge from-layer="4" from-port="0" to-layer="5425" to-port="0"/>
-		<edge from-layer="5" from-port="0" to-layer="5415" to-port="0"/>
-		<edge from-layer="6" from-port="0" to-layer="5326" to-port="0"/>
-		<edge from-layer="7" from-port="0" to-layer="5097" to-port="0"/>
-		<edge from-layer="8" from-port="0" to-layer="5067" to-port="0"/>
-		<edge from-layer="9" from-port="0" to-layer="5057" to-port="0"/>
-		<edge from-layer="10" from-port="0" to-layer="4968" to-port="0"/>
-		<edge from-layer="11" from-port="0" to-layer="4739" to-port="0"/>
-		<edge from-layer="12" from-port="0" to-layer="4709" to-port="0"/>
-		<edge from-layer="13" from-port="0" to-layer="4699" to-port="0"/>
-		<edge from-layer="14" from-port="0" to-layer="4610" to-port="0"/>
-		<edge from-layer="15" from-port="0" to-layer="4372" to-port="0"/>
-		<edge from-layer="16" from-port="0" to-layer="4342" to-port="0"/>
-		<edge from-layer="17" from-port="0" to-layer="4332" to-port="0"/>
-		<edge from-layer="18" from-port="0" to-layer="4243" to-port="0"/>
-		<edge from-layer="19" from-port="0" to-layer="4014" to-port="0"/>
-		<edge from-layer="20" from-port="0" to-layer="3984" to-port="0"/>
-		<edge from-layer="21" from-port="0" to-layer="3974" to-port="0"/>
-		<edge from-layer="22" from-port="0" to-layer="3885" to-port="0"/>
-		<edge from-layer="23" from-port="0" to-layer="3656" to-port="0"/>
-		<edge from-layer="24" from-port="0" to-layer="3626" to-port="0"/>
-		<edge from-layer="25" from-port="0" to-layer="3616" to-port="0"/>
-		<edge from-layer="26" from-port="0" to-layer="3527" to-port="0"/>
-		<edge from-layer="27" from-port="0" to-layer="3289" to-port="0"/>
-		<edge from-layer="28" from-port="0" to-layer="3259" to-port="0"/>
-		<edge from-layer="29" from-port="0" to-layer="3249" to-port="0"/>
-		<edge from-layer="30" from-port="0" to-layer="3160" to-port="0"/>
-		<edge from-layer="31" from-port="0" to-layer="2931" to-port="0"/>
-		<edge from-layer="32" from-port="0" to-layer="2901" to-port="0"/>
-		<edge from-layer="33" from-port="0" to-layer="2891" to-port="0"/>
-		<edge from-layer="34" from-port="0" to-layer="2802" to-port="0"/>
-		<edge from-layer="35" from-port="0" to-layer="2573" to-port="0"/>
-		<edge from-layer="36" from-port="0" to-layer="2543" to-port="0"/>
-		<edge from-layer="37" from-port="0" to-layer="2533" to-port="0"/>
-		<edge from-layer="38" from-port="0" to-layer="2444" to-port="0"/>
-		<edge from-layer="39" from-port="0" to-layer="2028" to-port="0"/>
-		<edge from-layer="40" from-port="0" to-layer="1998" to-port="0"/>
-		<edge from-layer="41" from-port="0" to-layer="1988" to-port="0"/>
-		<edge from-layer="42" from-port="0" to-layer="1899" to-port="0"/>
-		<edge from-layer="43" from-port="0" to-layer="1680" to-port="0"/>
-		<edge from-layer="44" from-port="0" to-layer="1650" to-port="0"/>
-		<edge from-layer="45" from-port="0" to-layer="1640" to-port="0"/>
-		<edge from-layer="46" from-port="0" to-layer="1551" to-port="0"/>
-		<edge from-layer="47" from-port="0" to-layer="1420" to-port="0"/>
-		<edge from-layer="48" from-port="0" to-layer="1390" to-port="0"/>
-		<edge from-layer="49" from-port="0" to-layer="1380" to-port="0"/>
-		<edge from-layer="50" from-port="0" to-layer="1289" to-port="0"/>
-		<edge from-layer="51" from-port="0" to-layer="1148" to-port="0"/>
-		<edge from-layer="52" from-port="0" to-layer="1118" to-port="0"/>
-		<edge from-layer="53" from-port="0" to-layer="1108" to-port="0"/>
-		<edge from-layer="54" from-port="0" to-layer="1019" to-port="0"/>
-		<edge from-layer="55" from-port="0" to-layer="888" to-port="0"/>
-		<edge from-layer="56" from-port="0" to-layer="858" to-port="0"/>
-		<edge from-layer="57" from-port="0" to-layer="848" to-port="0"/>
-		<edge from-layer="58" from-port="0" to-layer="757" to-port="0"/>
-		<edge from-layer="59" from-port="0" to-layer="616" to-port="0"/>
-		<edge from-layer="60" from-port="0" to-layer="586" to-port="0"/>
-		<edge from-layer="61" from-port="0" to-layer="576" to-port="0"/>
-		<edge from-layer="62" from-port="0" to-layer="487" to-port="0"/>
-		<edge from-layer="63" from-port="0" to-layer="356" to-port="0"/>
-		<edge from-layer="64" from-port="0" to-layer="326" to-port="0"/>
-		<edge from-layer="65" from-port="0" to-layer="316" to-port="0"/>
-		<edge from-layer="66" from-port="0" to-layer="225" to-port="0"/>
-		<edge from-layer="67" from-port="0" to-layer="68" to-port="1"/>
-		<edge from-layer="68" from-port="2" to-layer="70" to-port="0"/>
-		<edge from-layer="69" from-port="0" to-layer="70" to-port="1"/>
-		<edge from-layer="70" from-port="2" to-layer="5110" to-port="1"/>
-		<edge from-layer="70" from-port="2" to-layer="141" to-port="0"/>
-		<edge from-layer="70" from-port="2" to-layer="75" to-port="0"/>
-		<edge from-layer="70" from-port="2" to-layer="72" to-port="0"/>
-		<edge from-layer="71" from-port="0" to-layer="72" to-port="1"/>
-		<edge from-layer="72" from-port="2" to-layer="74" to-port="0"/>
-		<edge from-layer="73" from-port="0" to-layer="74" to-port="1"/>
-		<edge from-layer="74" from-port="2" to-layer="76" to-port="0"/>
-		<edge from-layer="75" from-port="1" to-layer="76" to-port="1"/>
-		<edge from-layer="76" from-port="2" to-layer="78" to-port="0"/>
-		<edge from-layer="77" from-port="0" to-layer="78" to-port="1"/>
-		<edge from-layer="78" from-port="2" to-layer="80" to-port="0"/>
-		<edge from-layer="79" from-port="0" to-layer="80" to-port="1"/>
-		<edge from-layer="80" from-port="2" to-layer="81" to-port="0"/>
-		<edge from-layer="81" from-port="1" to-layer="83" to-port="0"/>
-		<edge from-layer="82" from-port="0" to-layer="83" to-port="1"/>
-		<edge from-layer="83" from-port="2" to-layer="85" to-port="0"/>
-		<edge from-layer="84" from-port="0" to-layer="85" to-port="1"/>
-		<edge from-layer="85" from-port="2" to-layer="125" to-port="0"/>
-		<edge from-layer="86" from-port="0" to-layer="87" to-port="1"/>
-		<edge from-layer="87" from-port="2" to-layer="89" to-port="0"/>
-		<edge from-layer="88" from-port="0" to-layer="89" to-port="1"/>
-		<edge from-layer="89" from-port="2" to-layer="91" to-port="0"/>
-		<edge from-layer="90" from-port="0" to-layer="91" to-port="1"/>
-		<edge from-layer="91" from-port="2" to-layer="92" to-port="0"/>
-		<edge from-layer="92" from-port="1" to-layer="94" to-port="0"/>
-		<edge from-layer="93" from-port="0" to-layer="94" to-port="1"/>
-		<edge from-layer="94" from-port="2" to-layer="96" to-port="0"/>
-		<edge from-layer="94" from-port="2" to-layer="95" to-port="0"/>
-		<edge from-layer="95" from-port="1" to-layer="97" to-port="0"/>
-		<edge from-layer="96" from-port="1" to-layer="97" to-port="1"/>
-		<edge from-layer="97" from-port="2" to-layer="101" to-port="0"/>
-		<edge from-layer="97" from-port="2" to-layer="105" to-port="0"/>
-		<edge from-layer="98" from-port="0" to-layer="101" to-port="1"/>
-		<edge from-layer="99" from-port="0" to-layer="101" to-port="2"/>
-		<edge from-layer="100" from-port="0" to-layer="101" to-port="3"/>
-		<edge from-layer="101" from-port="4" to-layer="106" to-port="0"/>
-		<edge from-layer="102" from-port="0" to-layer="105" to-port="1"/>
-		<edge from-layer="103" from-port="0" to-layer="105" to-port="2"/>
-		<edge from-layer="104" from-port="0" to-layer="105" to-port="3"/>
-		<edge from-layer="105" from-port="4" to-layer="106" to-port="1"/>
-		<edge from-layer="106" from-port="2" to-layer="108" to-port="0"/>
-		<edge from-layer="107" from-port="0" to-layer="108" to-port="1"/>
-		<edge from-layer="108" from-port="2" to-layer="110" to-port="0"/>
-		<edge from-layer="109" from-port="0" to-layer="110" to-port="1"/>
-		<edge from-layer="110" from-port="2" to-layer="111" to-port="0"/>
-		<edge from-layer="111" from-port="1" to-layer="113" to-port="0"/>
-		<edge from-layer="112" from-port="0" to-layer="113" to-port="1"/>
-		<edge from-layer="113" from-port="2" to-layer="115" to-port="0"/>
-		<edge from-layer="114" from-port="0" to-layer="115" to-port="1"/>
-		<edge from-layer="115" from-port="2" to-layer="3689" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="1709" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="3331" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2964" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="1446" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="1181" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="914" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="649" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="382" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="4047" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2054" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2100" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2146" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2192" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="1750" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2248" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="116" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="2606" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="1791" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="5130" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="4772" to-port="0"/>
-		<edge from-layer="115" from-port="2" to-layer="4414" to-port="0"/>
-		<edge from-layer="116" from-port="1" to-layer="118" to-port="0"/>
-		<edge from-layer="117" from-port="0" to-layer="118" to-port="1"/>
-		<edge from-layer="118" from-port="2" to-layer="120" to-port="0"/>
-		<edge from-layer="119" from-port="0" to-layer="120" to-port="1"/>
-		<edge from-layer="120" from-port="2" to-layer="122" to-port="0"/>
-		<edge from-layer="121" from-port="0" to-layer="122" to-port="1"/>
-		<edge from-layer="122" from-port="2" to-layer="124" to-port="0"/>
-		<edge from-layer="123" from-port="0" to-layer="124" to-port="1"/>
-		<edge from-layer="124" from-port="2" to-layer="125" to-port="1"/>
-		<edge from-layer="125" from-port="2" to-layer="127" to-port="0"/>
-		<edge from-layer="125" from-port="2" to-layer="130" to-port="0"/>
-		<edge from-layer="126" from-port="0" to-layer="127" to-port="1"/>
-		<edge from-layer="127" from-port="2" to-layer="129" to-port="0"/>
-		<edge from-layer="128" from-port="0" to-layer="129" to-port="1"/>
-		<edge from-layer="129" from-port="2" to-layer="131" to-port="0"/>
-		<edge from-layer="130" from-port="1" to-layer="131" to-port="1"/>
-		<edge from-layer="131" from-port="2" to-layer="133" to-port="0"/>
-		<edge from-layer="132" from-port="0" to-layer="133" to-port="1"/>
-		<edge from-layer="133" from-port="2" to-layer="135" to-port="0"/>
-		<edge from-layer="134" from-port="0" to-layer="135" to-port="1"/>
-		<edge from-layer="135" from-port="2" to-layer="136" to-port="0"/>
-		<edge from-layer="136" from-port="1" to-layer="138" to-port="0"/>
-		<edge from-layer="137" from-port="0" to-layer="138" to-port="1"/>
-		<edge from-layer="138" from-port="2" to-layer="140" to-port="0"/>
-		<edge from-layer="139" from-port="0" to-layer="140" to-port="1"/>
-		<edge from-layer="140" from-port="2" to-layer="141" to-port="1"/>
-		<edge from-layer="141" from-port="2" to-layer="366" to-port="1"/>
-		<edge from-layer="141" from-port="2" to-layer="143" to-port="0"/>
-		<edge from-layer="141" from-port="2" to-layer="146" to-port="0"/>
-		<edge from-layer="142" from-port="0" to-layer="143" to-port="1"/>
-		<edge from-layer="143" from-port="2" to-layer="145" to-port="0"/>
-		<edge from-layer="144" from-port="0" to-layer="145" to-port="1"/>
-		<edge from-layer="145" from-port="2" to-layer="147" to-port="0"/>
-		<edge from-layer="146" from-port="1" to-layer="147" to-port="1"/>
-		<edge from-layer="147" from-port="2" to-layer="149" to-port="0"/>
-		<edge from-layer="148" from-port="0" to-layer="149" to-port="1"/>
-		<edge from-layer="149" from-port="2" to-layer="151" to-port="0"/>
-		<edge from-layer="150" from-port="0" to-layer="151" to-port="1"/>
-		<edge from-layer="151" from-port="2" to-layer="153" to-port="0"/>
-		<edge from-layer="152" from-port="0" to-layer="153" to-port="1"/>
-		<edge from-layer="153" from-port="2" to-layer="155" to-port="0"/>
-		<edge from-layer="154" from-port="0" to-layer="155" to-port="1"/>
-		<edge from-layer="155" from-port="2" to-layer="157" to-port="0"/>
-		<edge from-layer="156" from-port="0" to-layer="157" to-port="1"/>
-		<edge from-layer="157" from-port="2" to-layer="159" to-port="0"/>
-		<edge from-layer="158" from-port="0" to-layer="424" to-port="1"/>
-		<edge from-layer="158" from-port="0" to-layer="159" to-port="1"/>
-		<edge from-layer="159" from-port="2" to-layer="161" to-port="0"/>
-		<edge from-layer="159" from-port="2" to-layer="226" to-port="1"/>
-		<edge from-layer="160" from-port="0" to-layer="161" to-port="1"/>
-		<edge from-layer="161" from-port="2" to-layer="163" to-port="0"/>
-		<edge from-layer="162" from-port="0" to-layer="163" to-port="1"/>
-		<edge from-layer="163" from-port="2" to-layer="165" to-port="0"/>
-		<edge from-layer="164" from-port="0" to-layer="165" to-port="1"/>
-		<edge from-layer="165" from-port="2" to-layer="185" to-port="0"/>
-		<edge from-layer="165" from-port="2" to-layer="167" to-port="0"/>
-		<edge from-layer="165" from-port="2" to-layer="175" to-port="0"/>
-		<edge from-layer="166" from-port="0" to-layer="167" to-port="1"/>
-		<edge from-layer="167" from-port="2" to-layer="169" to-port="0"/>
-		<edge from-layer="168" from-port="0" to-layer="169" to-port="1"/>
-		<edge from-layer="168" from-port="0" to-layer="433" to-port="1"/>
-		<edge from-layer="168" from-port="0" to-layer="439" to-port="1"/>
-		<edge from-layer="168" from-port="0" to-layer="176" to-port="1"/>
-		<edge from-layer="168" from-port="0" to-layer="449" to-port="1"/>
-		<edge from-layer="168" from-port="0" to-layer="186" to-port="1"/>
-		<edge from-layer="169" from-port="2" to-layer="171" to-port="0"/>
-		<edge from-layer="170" from-port="0" to-layer="171" to-port="1"/>
-		<edge from-layer="171" from-port="2" to-layer="173" to-port="0"/>
-		<edge from-layer="172" from-port="0" to-layer="173" to-port="1"/>
-		<edge from-layer="172" from-port="0" to-layer="179" to-port="1"/>
-		<edge from-layer="172" from-port="0" to-layer="436" to-port="1"/>
-		<edge from-layer="172" from-port="0" to-layer="442" to-port="1"/>
-		<edge from-layer="172" from-port="0" to-layer="452" to-port="1"/>
-		<edge from-layer="172" from-port="0" to-layer="189" to-port="1"/>
-		<edge from-layer="173" from-port="2" to-layer="180" to-port="0"/>
-		<edge from-layer="174" from-port="0" to-layer="175" to-port="1"/>
-		<edge from-layer="175" from-port="2" to-layer="176" to-port="0"/>
-		<edge from-layer="176" from-port="2" to-layer="178" to-port="0"/>
-		<edge from-layer="177" from-port="0" to-layer="178" to-port="1"/>
-		<edge from-layer="178" from-port="2" to-layer="179" to-port="0"/>
-		<edge from-layer="179" from-port="2" to-layer="180" to-port="1"/>
-		<edge from-layer="180" from-port="2" to-layer="182" to-port="0"/>
-		<edge from-layer="181" from-port="0" to-layer="182" to-port="1"/>
-		<edge from-layer="182" from-port="2" to-layer="183" to-port="0"/>
-		<edge from-layer="183" from-port="1" to-layer="190" to-port="0"/>
-		<edge from-layer="184" from-port="0" to-layer="185" to-port="1"/>
-		<edge from-layer="185" from-port="2" to-layer="186" to-port="0"/>
-		<edge from-layer="186" from-port="2" to-layer="188" to-port="0"/>
-		<edge from-layer="187" from-port="0" to-layer="188" to-port="1"/>
-		<edge from-layer="188" from-port="2" to-layer="189" to-port="0"/>
-		<edge from-layer="189" from-port="2" to-layer="190" to-port="1"/>
-		<edge from-layer="190" from-port="2" to-layer="204" to-port="0"/>
-		<edge from-layer="190" from-port="2" to-layer="191" to-port="0"/>
-		<edge from-layer="191" from-port="1" to-layer="194" to-port="0"/>
-		<edge from-layer="191" from-port="1" to-layer="216" to-port="0"/>
-		<edge from-layer="191" from-port="1" to-layer="202" to-port="0"/>
-		<edge from-layer="191" from-port="1" to-layer="213" to-port="0"/>
-		<edge from-layer="192" from-port="0" to-layer="194" to-port="1"/>
-		<edge from-layer="193" from-port="0" to-layer="194" to-port="2"/>
-		<edge from-layer="194" from-port="3" to-layer="208" to-port="0"/>
-		<edge from-layer="194" from-port="3" to-layer="196" to-port="0"/>
-		<edge from-layer="195" from-port="0" to-layer="196" to-port="1"/>
-		<edge from-layer="196" from-port="2" to-layer="198" to-port="0"/>
-		<edge from-layer="197" from-port="0" to-layer="198" to-port="1"/>
-		<edge from-layer="198" from-port="2" to-layer="203" to-port="0"/>
-		<edge from-layer="199" from-port="0" to-layer="1618" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4879" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4848" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4914" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4629" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4946" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2821" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5035" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1038" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4987" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2390" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="554" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="735" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="776" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="826" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="506" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="465" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1877" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1918" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1966" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="997" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2324" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2355" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5206" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2422" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2463" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2511" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2682" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2713" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2748" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2780" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5393" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5345" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5304" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5272" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="5237" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1529" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1086" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3952" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3904" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3863" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1570" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3831" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3796" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3765" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4677" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3594" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3546" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="2869" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3505" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="294" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3473" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3438" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3407" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3040" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3071" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3106" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3138" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3179" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="3227" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4221" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4310" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4123" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="203" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1308" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4262" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4490" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4189" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4521" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1358" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4556" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4588" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="4154" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="1267" to-port="1"/>
-		<edge from-layer="199" from-port="0" to-layer="244" to-port="1"/>
-		<edge from-layer="200" from-port="0" to-layer="202" to-port="1"/>
-		<edge from-layer="201" from-port="0" to-layer="202" to-port="2"/>
-		<edge from-layer="202" from-port="3" to-layer="203" to-port="2"/>
-		<edge from-layer="203" from-port="3" to-layer="204" to-port="1"/>
-		<edge from-layer="204" from-port="2" to-layer="206" to-port="0"/>
-		<edge from-layer="205" from-port="0" to-layer="206" to-port="1"/>
-		<edge from-layer="206" from-port="2" to-layer="222" to-port="0"/>
-		<edge from-layer="207" from-port="0" to-layer="208" to-port="1"/>
-		<edge from-layer="208" from-port="2" to-layer="210" to-port="0"/>
-		<edge from-layer="209" from-port="0" to-layer="210" to-port="1"/>
-		<edge from-layer="210" from-port="2" to-layer="221" to-port="0"/>
-		<edge from-layer="211" from-port="0" to-layer="213" to-port="1"/>
-		<edge from-layer="212" from-port="0" to-layer="213" to-port="2"/>
-		<edge from-layer="213" from-port="3" to-layer="221" to-port="1"/>
-		<edge from-layer="214" from-port="0" to-layer="216" to-port="1"/>
-		<edge from-layer="215" from-port="0" to-layer="216" to-port="2"/>
-		<edge from-layer="216" from-port="3" to-layer="218" to-port="0"/>
-		<edge from-layer="217" from-port="0" to-layer="218" to-port="1"/>
-		<edge from-layer="218" from-port="2" to-layer="220" to-port="0"/>
-		<edge from-layer="219" from-port="0" to-layer="220" to-port="1"/>
-		<edge from-layer="220" from-port="2" to-layer="221" to-port="2"/>
-		<edge from-layer="221" from-port="3" to-layer="222" to-port="1"/>
-		<edge from-layer="222" from-port="2" to-layer="224" to-port="0"/>
-		<edge from-layer="223" from-port="0" to-layer="224" to-port="1"/>
-		<edge from-layer="224" from-port="2" to-layer="225" to-port="1"/>
-		<edge from-layer="225" from-port="2" to-layer="226" to-port="0"/>
-		<edge from-layer="226" from-port="2" to-layer="228" to-port="0"/>
-		<edge from-layer="226" from-port="2" to-layer="317" to-port="1"/>
-		<edge from-layer="227" from-port="0" to-layer="228" to-port="1"/>
-		<edge from-layer="228" from-port="2" to-layer="230" to-port="0"/>
-		<edge from-layer="229" from-port="0" to-layer="230" to-port="1"/>
-		<edge from-layer="230" from-port="2" to-layer="232" to-port="0"/>
-		<edge from-layer="231" from-port="0" to-layer="232" to-port="1"/>
-		<edge from-layer="232" from-port="2" to-layer="234" to-port="0"/>
-		<edge from-layer="233" from-port="0" to-layer="234" to-port="1"/>
-		<edge from-layer="234" from-port="2" to-layer="245" to-port="0"/>
-		<edge from-layer="234" from-port="2" to-layer="236" to-port="0"/>
-		<edge from-layer="235" from-port="0" to-layer="244" to-port="0"/>
-		<edge from-layer="236" from-port="1" to-layer="239" to-port="0"/>
-		<edge from-layer="236" from-port="1" to-layer="250" to-port="0"/>
-		<edge from-layer="236" from-port="1" to-layer="257" to-port="0"/>
-		<edge from-layer="237" from-port="0" to-layer="239" to-port="1"/>
-		<edge from-layer="238" from-port="0" to-layer="239" to-port="2"/>
-		<edge from-layer="239" from-port="3" to-layer="259" to-port="0"/>
-		<edge from-layer="239" from-port="3" to-layer="241" to-port="0"/>
-		<edge from-layer="240" from-port="0" to-layer="241" to-port="1"/>
-		<edge from-layer="241" from-port="2" to-layer="243" to-port="0"/>
-		<edge from-layer="242" from-port="0" to-layer="243" to-port="1"/>
-		<edge from-layer="243" from-port="2" to-layer="244" to-port="2"/>
-		<edge from-layer="244" from-port="3" to-layer="245" to-port="1"/>
-		<edge from-layer="245" from-port="2" to-layer="247" to-port="0"/>
-		<edge from-layer="246" from-port="0" to-layer="247" to-port="1"/>
-		<edge from-layer="247" from-port="2" to-layer="263" to-port="0"/>
-		<edge from-layer="248" from-port="0" to-layer="250" to-port="1"/>
-		<edge from-layer="249" from-port="0" to-layer="250" to-port="2"/>
-		<edge from-layer="250" from-port="3" to-layer="252" to-port="0"/>
-		<edge from-layer="251" from-port="0" to-layer="252" to-port="1"/>
-		<edge from-layer="252" from-port="2" to-layer="254" to-port="0"/>
-		<edge from-layer="253" from-port="0" to-layer="254" to-port="1"/>
-		<edge from-layer="254" from-port="2" to-layer="262" to-port="0"/>
-		<edge from-layer="255" from-port="0" to-layer="257" to-port="1"/>
-		<edge from-layer="256" from-port="0" to-layer="257" to-port="2"/>
-		<edge from-layer="257" from-port="3" to-layer="262" to-port="1"/>
-		<edge from-layer="258" from-port="0" to-layer="259" to-port="1"/>
-		<edge from-layer="259" from-port="2" to-layer="261" to-port="0"/>
-		<edge from-layer="260" from-port="0" to-layer="261" to-port="1"/>
-		<edge from-layer="261" from-port="2" to-layer="262" to-port="2"/>
-		<edge from-layer="262" from-port="3" to-layer="263" to-port="1"/>
-		<edge from-layer="263" from-port="2" to-layer="272" to-port="0"/>
-		<edge from-layer="264" from-port="0" to-layer="265" to-port="1"/>
-		<edge from-layer="265" from-port="2" to-layer="267" to-port="0"/>
-		<edge from-layer="266" from-port="0" to-layer="4651" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="267" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="278" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="4661" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="5009" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="528" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="5019" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="538" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="5367" to-port="1"/>
-		<edge from-layer="266" from-port="0" to-layer="5377" to-port="1"/>
-		<edge from-layer="267" from-port="2" to-layer="269" to-port="0"/>
-		<edge from-layer="268" from-port="0" to-layer="269" to-port="1"/>
-		<edge from-layer="269" from-port="2" to-layer="271" to-port="0"/>
-		<edge from-layer="270" from-port="0" to-layer="4654" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="281" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="5380" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="271" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="5370" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="541" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="5022" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="531" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="5012" to-port="1"/>
-		<edge from-layer="270" from-port="0" to-layer="4664" to-port="1"/>
-		<edge from-layer="271" from-port="2" to-layer="272" to-port="1"/>
-		<edge from-layer="272" from-port="2" to-layer="274" to-port="0"/>
-		<edge from-layer="273" from-port="0" to-layer="274" to-port="1"/>
-		<edge from-layer="274" from-port="2" to-layer="275" to-port="0"/>
-		<edge from-layer="275" from-port="1" to-layer="282" to-port="0"/>
-		<edge from-layer="276" from-port="0" to-layer="277" to-port="1"/>
-		<edge from-layer="277" from-port="2" to-layer="278" to-port="0"/>
-		<edge from-layer="278" from-port="2" to-layer="280" to-port="0"/>
-		<edge from-layer="279" from-port="0" to-layer="280" to-port="1"/>
-		<edge from-layer="280" from-port="2" to-layer="281" to-port="0"/>
-		<edge from-layer="281" from-port="2" to-layer="282" to-port="1"/>
-		<edge from-layer="282" from-port="2" to-layer="283" to-port="0"/>
-		<edge from-layer="282" from-port="2" to-layer="295" to-port="0"/>
-		<edge from-layer="283" from-port="1" to-layer="286" to-port="0"/>
-		<edge from-layer="283" from-port="1" to-layer="307" to-port="0"/>
-		<edge from-layer="283" from-port="1" to-layer="304" to-port="0"/>
-		<edge from-layer="283" from-port="1" to-layer="293" to-port="0"/>
-		<edge from-layer="284" from-port="0" to-layer="286" to-port="1"/>
-		<edge from-layer="285" from-port="0" to-layer="286" to-port="2"/>
-		<edge from-layer="286" from-port="3" to-layer="299" to-port="0"/>
-		<edge from-layer="286" from-port="3" to-layer="288" to-port="0"/>
-		<edge from-layer="287" from-port="0" to-layer="288" to-port="1"/>
-		<edge from-layer="288" from-port="2" to-layer="290" to-port="0"/>
-		<edge from-layer="289" from-port="0" to-layer="290" to-port="1"/>
-		<edge from-layer="290" from-port="2" to-layer="294" to-port="0"/>
-		<edge from-layer="291" from-port="0" to-layer="293" to-port="1"/>
-		<edge from-layer="292" from-port="0" to-layer="293" to-port="2"/>
-		<edge from-layer="293" from-port="3" to-layer="294" to-port="2"/>
-		<edge from-layer="294" from-port="3" to-layer="295" to-port="1"/>
-		<edge from-layer="295" from-port="2" to-layer="297" to-port="0"/>
-		<edge from-layer="296" from-port="0" to-layer="297" to-port="1"/>
-		<edge from-layer="297" from-port="2" to-layer="313" to-port="0"/>
-		<edge from-layer="298" from-port="0" to-layer="299" to-port="1"/>
-		<edge from-layer="299" from-port="2" to-layer="301" to-port="0"/>
-		<edge from-layer="300" from-port="0" to-layer="301" to-port="1"/>
-		<edge from-layer="301" from-port="2" to-layer="312" to-port="0"/>
-		<edge from-layer="302" from-port="0" to-layer="304" to-port="1"/>
-		<edge from-layer="303" from-port="0" to-layer="304" to-port="2"/>
-		<edge from-layer="304" from-port="3" to-layer="312" to-port="1"/>
-		<edge from-layer="305" from-port="0" to-layer="307" to-port="1"/>
-		<edge from-layer="306" from-port="0" to-layer="307" to-port="2"/>
-		<edge from-layer="307" from-port="3" to-layer="309" to-port="0"/>
-		<edge from-layer="308" from-port="0" to-layer="309" to-port="1"/>
-		<edge from-layer="309" from-port="2" to-layer="311" to-port="0"/>
-		<edge from-layer="310" from-port="0" to-layer="311" to-port="1"/>
-		<edge from-layer="311" from-port="2" to-layer="312" to-port="2"/>
-		<edge from-layer="312" from-port="3" to-layer="313" to-port="1"/>
-		<edge from-layer="313" from-port="2" to-layer="315" to-port="0"/>
-		<edge from-layer="314" from-port="0" to-layer="315" to-port="1"/>
-		<edge from-layer="315" from-port="2" to-layer="316" to-port="1"/>
-		<edge from-layer="316" from-port="2" to-layer="317" to-port="0"/>
-		<edge from-layer="317" from-port="2" to-layer="319" to-port="0"/>
-		<edge from-layer="317" from-port="2" to-layer="357" to-port="1"/>
-		<edge from-layer="318" from-port="0" to-layer="319" to-port="1"/>
-		<edge from-layer="319" from-port="2" to-layer="321" to-port="0"/>
-		<edge from-layer="320" from-port="0" to-layer="321" to-port="1"/>
-		<edge from-layer="321" from-port="2" to-layer="323" to-port="0"/>
-		<edge from-layer="322" from-port="0" to-layer="323" to-port="1"/>
-		<edge from-layer="323" from-port="2" to-layer="325" to-port="0"/>
-		<edge from-layer="324" from-port="0" to-layer="325" to-port="1"/>
-		<edge from-layer="325" from-port="2" to-layer="326" to-port="1"/>
-		<edge from-layer="326" from-port="2" to-layer="330" to-port="0"/>
-		<edge from-layer="326" from-port="2" to-layer="341" to-port="0"/>
-		<edge from-layer="326" from-port="2" to-layer="351" to-port="0"/>
-		<edge from-layer="327" from-port="0" to-layer="341" to-port="1"/>
-		<edge from-layer="328" from-port="0" to-layer="339" to-port="0"/>
-		<edge from-layer="329" from-port="0" to-layer="339" to-port="1"/>
-		<edge from-layer="330" from-port="1" to-layer="333" to-port="0"/>
-		<edge from-layer="331" from-port="0" to-layer="333" to-port="1"/>
-		<edge from-layer="332" from-port="0" to-layer="333" to-port="2"/>
-		<edge from-layer="333" from-port="3" to-layer="335" to-port="0"/>
-		<edge from-layer="334" from-port="0" to-layer="335" to-port="1"/>
-		<edge from-layer="335" from-port="2" to-layer="337" to-port="0"/>
-		<edge from-layer="336" from-port="0" to-layer="337" to-port="1"/>
-		<edge from-layer="337" from-port="2" to-layer="345" to-port="2"/>
-		<edge from-layer="337" from-port="2" to-layer="348" to-port="0"/>
-		<edge from-layer="337" from-port="2" to-layer="339" to-port="2"/>
-		<edge from-layer="338" from-port="0" to-layer="339" to-port="3"/>
-		<edge from-layer="339" from-port="4" to-layer="341" to-port="2"/>
-		<edge from-layer="340" from-port="0" to-layer="341" to-port="3"/>
-		<edge from-layer="341" from-port="4" to-layer="353" to-port="0"/>
-		<edge from-layer="342" from-port="0" to-layer="345" to-port="0"/>
-		<edge from-layer="343" from-port="0" to-layer="345" to-port="1"/>
-		<edge from-layer="343" from-port="0" to-layer="349" to-port="1"/>
-		<edge from-layer="344" from-port="0" to-layer="345" to-port="3"/>
-		<edge from-layer="344" from-port="0" to-layer="349" to-port="3"/>
-		<edge from-layer="345" from-port="4" to-layer="351" to-port="1"/>
-		<edge from-layer="346" from-port="0" to-layer="349" to-port="0"/>
-		<edge from-layer="347" from-port="0" to-layer="348" to-port="1"/>
-		<edge from-layer="348" from-port="2" to-layer="349" to-port="2"/>
-		<edge from-layer="349" from-port="4" to-layer="351" to-port="2"/>
-		<edge from-layer="350" from-port="0" to-layer="351" to-port="3"/>
-		<edge from-layer="351" from-port="4" to-layer="352" to-port="0"/>
-		<edge from-layer="352" from-port="1" to-layer="353" to-port="1"/>
-		<edge from-layer="353" from-port="2" to-layer="355" to-port="0"/>
-		<edge from-layer="354" from-port="0" to-layer="355" to-port="1"/>
-		<edge from-layer="355" from-port="2" to-layer="356" to-port="1"/>
-		<edge from-layer="356" from-port="2" to-layer="357" to-port="0"/>
-		<edge from-layer="357" from-port="2" to-layer="359" to-port="0"/>
-		<edge from-layer="358" from-port="0" to-layer="359" to-port="1"/>
-		<edge from-layer="358" from-port="0" to-layer="618" to-port="1"/>
-		<edge from-layer="359" from-port="2" to-layer="361" to-port="0"/>
-		<edge from-layer="360" from-port="0" to-layer="361" to-port="1"/>
-		<edge from-layer="361" from-port="2" to-layer="363" to-port="0"/>
-		<edge from-layer="362" from-port="0" to-layer="363" to-port="1"/>
-		<edge from-layer="363" from-port="2" to-layer="365" to-port="0"/>
-		<edge from-layer="364" from-port="0" to-layer="365" to-port="1"/>
-		<edge from-layer="365" from-port="2" to-layer="366" to-port="0"/>
-		<edge from-layer="366" from-port="2" to-layer="407" to-port="0"/>
-		<edge from-layer="366" from-port="2" to-layer="368" to-port="0"/>
-		<edge from-layer="366" from-port="2" to-layer="371" to-port="0"/>
-		<edge from-layer="366" from-port="2" to-layer="4752" to-port="1"/>
-		<edge from-layer="367" from-port="0" to-layer="368" to-port="1"/>
-		<edge from-layer="368" from-port="2" to-layer="370" to-port="0"/>
-		<edge from-layer="369" from-port="0" to-layer="370" to-port="1"/>
-		<edge from-layer="370" from-port="2" to-layer="372" to-port="0"/>
-		<edge from-layer="371" from-port="1" to-layer="372" to-port="1"/>
-		<edge from-layer="372" from-port="2" to-layer="374" to-port="0"/>
-		<edge from-layer="373" from-port="0" to-layer="374" to-port="1"/>
-		<edge from-layer="374" from-port="2" to-layer="376" to-port="0"/>
-		<edge from-layer="375" from-port="0" to-layer="376" to-port="1"/>
-		<edge from-layer="376" from-port="2" to-layer="377" to-port="0"/>
-		<edge from-layer="377" from-port="1" to-layer="379" to-port="0"/>
-		<edge from-layer="378" from-port="0" to-layer="379" to-port="1"/>
-		<edge from-layer="379" from-port="2" to-layer="381" to-port="0"/>
-		<edge from-layer="380" from-port="0" to-layer="381" to-port="1"/>
-		<edge from-layer="381" from-port="2" to-layer="391" to-port="0"/>
-		<edge from-layer="382" from-port="1" to-layer="384" to-port="0"/>
-		<edge from-layer="383" from-port="0" to-layer="384" to-port="1"/>
-		<edge from-layer="384" from-port="2" to-layer="386" to-port="0"/>
-		<edge from-layer="385" from-port="0" to-layer="386" to-port="1"/>
-		<edge from-layer="386" from-port="2" to-layer="388" to-port="0"/>
-		<edge from-layer="387" from-port="0" to-layer="388" to-port="1"/>
-		<edge from-layer="388" from-port="2" to-layer="390" to-port="0"/>
-		<edge from-layer="389" from-port="0" to-layer="390" to-port="1"/>
-		<edge from-layer="390" from-port="2" to-layer="391" to-port="1"/>
-		<edge from-layer="391" from-port="2" to-layer="393" to-port="0"/>
-		<edge from-layer="391" from-port="2" to-layer="396" to-port="0"/>
-		<edge from-layer="392" from-port="0" to-layer="393" to-port="1"/>
-		<edge from-layer="393" from-port="2" to-layer="395" to-port="0"/>
-		<edge from-layer="394" from-port="0" to-layer="395" to-port="1"/>
-		<edge from-layer="395" from-port="2" to-layer="397" to-port="0"/>
-		<edge from-layer="396" from-port="1" to-layer="397" to-port="1"/>
-		<edge from-layer="397" from-port="2" to-layer="399" to-port="0"/>
-		<edge from-layer="398" from-port="0" to-layer="399" to-port="1"/>
-		<edge from-layer="399" from-port="2" to-layer="401" to-port="0"/>
-		<edge from-layer="400" from-port="0" to-layer="401" to-port="1"/>
-		<edge from-layer="401" from-port="2" to-layer="402" to-port="0"/>
-		<edge from-layer="402" from-port="1" to-layer="404" to-port="0"/>
-		<edge from-layer="403" from-port="0" to-layer="404" to-port="1"/>
-		<edge from-layer="404" from-port="2" to-layer="406" to-port="0"/>
-		<edge from-layer="405" from-port="0" to-layer="406" to-port="1"/>
-		<edge from-layer="406" from-port="2" to-layer="407" to-port="1"/>
-		<edge from-layer="407" from-port="2" to-layer="409" to-port="0"/>
-		<edge from-layer="407" from-port="2" to-layer="412" to-port="0"/>
-		<edge from-layer="407" from-port="2" to-layer="625" to-port="1"/>
-		<edge from-layer="408" from-port="0" to-layer="409" to-port="1"/>
-		<edge from-layer="409" from-port="2" to-layer="411" to-port="0"/>
-		<edge from-layer="410" from-port="0" to-layer="411" to-port="1"/>
-		<edge from-layer="411" from-port="2" to-layer="413" to-port="0"/>
-		<edge from-layer="412" from-port="1" to-layer="413" to-port="1"/>
-		<edge from-layer="413" from-port="2" to-layer="415" to-port="0"/>
-		<edge from-layer="414" from-port="0" to-layer="415" to-port="1"/>
-		<edge from-layer="415" from-port="2" to-layer="417" to-port="0"/>
-		<edge from-layer="416" from-port="0" to-layer="417" to-port="1"/>
-		<edge from-layer="417" from-port="2" to-layer="419" to-port="0"/>
-		<edge from-layer="418" from-port="0" to-layer="419" to-port="1"/>
-		<edge from-layer="419" from-port="2" to-layer="421" to-port="0"/>
-		<edge from-layer="420" from-port="0" to-layer="421" to-port="1"/>
-		<edge from-layer="421" from-port="2" to-layer="423" to-port="0"/>
-		<edge from-layer="422" from-port="0" to-layer="423" to-port="1"/>
-		<edge from-layer="423" from-port="2" to-layer="424" to-port="0"/>
-		<edge from-layer="424" from-port="2" to-layer="488" to-port="1"/>
-		<edge from-layer="424" from-port="2" to-layer="426" to-port="0"/>
-		<edge from-layer="425" from-port="0" to-layer="426" to-port="1"/>
-		<edge from-layer="426" from-port="2" to-layer="428" to-port="0"/>
-		<edge from-layer="427" from-port="0" to-layer="428" to-port="1"/>
-		<edge from-layer="428" from-port="2" to-layer="430" to-port="0"/>
-		<edge from-layer="429" from-port="0" to-layer="430" to-port="1"/>
-		<edge from-layer="430" from-port="2" to-layer="448" to-port="0"/>
-		<edge from-layer="430" from-port="2" to-layer="438" to-port="0"/>
-		<edge from-layer="430" from-port="2" to-layer="432" to-port="0"/>
-		<edge from-layer="431" from-port="0" to-layer="432" to-port="1"/>
-		<edge from-layer="432" from-port="2" to-layer="433" to-port="0"/>
-		<edge from-layer="433" from-port="2" to-layer="435" to-port="0"/>
-		<edge from-layer="434" from-port="0" to-layer="435" to-port="1"/>
-		<edge from-layer="435" from-port="2" to-layer="436" to-port="0"/>
-		<edge from-layer="436" from-port="2" to-layer="443" to-port="0"/>
-		<edge from-layer="437" from-port="0" to-layer="438" to-port="1"/>
-		<edge from-layer="438" from-port="2" to-layer="439" to-port="0"/>
-		<edge from-layer="439" from-port="2" to-layer="441" to-port="0"/>
-		<edge from-layer="440" from-port="0" to-layer="441" to-port="1"/>
-		<edge from-layer="441" from-port="2" to-layer="442" to-port="0"/>
-		<edge from-layer="442" from-port="2" to-layer="443" to-port="1"/>
-		<edge from-layer="443" from-port="2" to-layer="445" to-port="0"/>
-		<edge from-layer="444" from-port="0" to-layer="445" to-port="1"/>
-		<edge from-layer="445" from-port="2" to-layer="446" to-port="0"/>
-		<edge from-layer="446" from-port="1" to-layer="453" to-port="0"/>
-		<edge from-layer="447" from-port="0" to-layer="448" to-port="1"/>
-		<edge from-layer="448" from-port="2" to-layer="449" to-port="0"/>
-		<edge from-layer="449" from-port="2" to-layer="451" to-port="0"/>
-		<edge from-layer="450" from-port="0" to-layer="451" to-port="1"/>
-		<edge from-layer="451" from-port="2" to-layer="452" to-port="0"/>
-		<edge from-layer="452" from-port="2" to-layer="453" to-port="1"/>
-		<edge from-layer="453" from-port="2" to-layer="454" to-port="0"/>
-		<edge from-layer="453" from-port="2" to-layer="466" to-port="0"/>
-		<edge from-layer="454" from-port="1" to-layer="457" to-port="0"/>
-		<edge from-layer="454" from-port="1" to-layer="464" to-port="0"/>
-		<edge from-layer="454" from-port="1" to-layer="478" to-port="0"/>
-		<edge from-layer="454" from-port="1" to-layer="475" to-port="0"/>
-		<edge from-layer="455" from-port="0" to-layer="457" to-port="1"/>
-		<edge from-layer="456" from-port="0" to-layer="457" to-port="2"/>
-		<edge from-layer="457" from-port="3" to-layer="470" to-port="0"/>
-		<edge from-layer="457" from-port="3" to-layer="459" to-port="0"/>
-		<edge from-layer="458" from-port="0" to-layer="459" to-port="1"/>
-		<edge from-layer="459" from-port="2" to-layer="461" to-port="0"/>
-		<edge from-layer="460" from-port="0" to-layer="461" to-port="1"/>
-		<edge from-layer="461" from-port="2" to-layer="465" to-port="0"/>
-		<edge from-layer="462" from-port="0" to-layer="464" to-port="1"/>
-		<edge from-layer="463" from-port="0" to-layer="464" to-port="2"/>
-		<edge from-layer="464" from-port="3" to-layer="465" to-port="2"/>
-		<edge from-layer="465" from-port="3" to-layer="466" to-port="1"/>
-		<edge from-layer="466" from-port="2" to-layer="468" to-port="0"/>
-		<edge from-layer="467" from-port="0" to-layer="468" to-port="1"/>
-		<edge from-layer="468" from-port="2" to-layer="484" to-port="0"/>
-		<edge from-layer="469" from-port="0" to-layer="470" to-port="1"/>
-		<edge from-layer="470" from-port="2" to-layer="472" to-port="0"/>
-		<edge from-layer="471" from-port="0" to-layer="472" to-port="1"/>
-		<edge from-layer="472" from-port="2" to-layer="483" to-port="0"/>
-		<edge from-layer="473" from-port="0" to-layer="475" to-port="1"/>
-		<edge from-layer="474" from-port="0" to-layer="475" to-port="2"/>
-		<edge from-layer="475" from-port="3" to-layer="483" to-port="1"/>
-		<edge from-layer="476" from-port="0" to-layer="478" to-port="1"/>
-		<edge from-layer="477" from-port="0" to-layer="478" to-port="2"/>
-		<edge from-layer="478" from-port="3" to-layer="480" to-port="0"/>
-		<edge from-layer="479" from-port="0" to-layer="480" to-port="1"/>
-		<edge from-layer="480" from-port="2" to-layer="482" to-port="0"/>
-		<edge from-layer="481" from-port="0" to-layer="482" to-port="1"/>
-		<edge from-layer="482" from-port="2" to-layer="483" to-port="2"/>
-		<edge from-layer="483" from-port="3" to-layer="484" to-port="1"/>
-		<edge from-layer="484" from-port="2" to-layer="486" to-port="0"/>
-		<edge from-layer="485" from-port="0" to-layer="486" to-port="1"/>
-		<edge from-layer="486" from-port="2" to-layer="487" to-port="1"/>
-		<edge from-layer="487" from-port="2" to-layer="488" to-port="0"/>
-		<edge from-layer="488" from-port="2" to-layer="577" to-port="1"/>
-		<edge from-layer="488" from-port="2" to-layer="490" to-port="0"/>
-		<edge from-layer="489" from-port="0" to-layer="490" to-port="1"/>
-		<edge from-layer="490" from-port="2" to-layer="492" to-port="0"/>
-		<edge from-layer="491" from-port="0" to-layer="492" to-port="1"/>
-		<edge from-layer="492" from-port="2" to-layer="494" to-port="0"/>
-		<edge from-layer="493" from-port="0" to-layer="494" to-port="1"/>
-		<edge from-layer="494" from-port="2" to-layer="496" to-port="0"/>
-		<edge from-layer="495" from-port="0" to-layer="496" to-port="1"/>
-		<edge from-layer="496" from-port="2" to-layer="498" to-port="0"/>
-		<edge from-layer="496" from-port="2" to-layer="507" to-port="0"/>
-		<edge from-layer="497" from-port="0" to-layer="506" to-port="0"/>
-		<edge from-layer="498" from-port="1" to-layer="501" to-port="0"/>
-		<edge from-layer="498" from-port="1" to-layer="512" to-port="0"/>
-		<edge from-layer="498" from-port="1" to-layer="519" to-port="0"/>
-		<edge from-layer="499" from-port="0" to-layer="501" to-port="1"/>
-		<edge from-layer="500" from-port="0" to-layer="501" to-port="2"/>
-		<edge from-layer="501" from-port="3" to-layer="521" to-port="0"/>
-		<edge from-layer="501" from-port="3" to-layer="503" to-port="0"/>
-		<edge from-layer="502" from-port="0" to-layer="503" to-port="1"/>
-		<edge from-layer="503" from-port="2" to-layer="505" to-port="0"/>
-		<edge from-layer="504" from-port="0" to-layer="505" to-port="1"/>
-		<edge from-layer="505" from-port="2" to-layer="506" to-port="2"/>
-		<edge from-layer="506" from-port="3" to-layer="507" to-port="1"/>
-		<edge from-layer="507" from-port="2" to-layer="509" to-port="0"/>
-		<edge from-layer="508" from-port="0" to-layer="509" to-port="1"/>
-		<edge from-layer="509" from-port="2" to-layer="525" to-port="0"/>
-		<edge from-layer="510" from-port="0" to-layer="512" to-port="1"/>
-		<edge from-layer="511" from-port="0" to-layer="512" to-port="2"/>
-		<edge from-layer="512" from-port="3" to-layer="514" to-port="0"/>
-		<edge from-layer="513" from-port="0" to-layer="514" to-port="1"/>
-		<edge from-layer="514" from-port="2" to-layer="516" to-port="0"/>
-		<edge from-layer="515" from-port="0" to-layer="516" to-port="1"/>
-		<edge from-layer="516" from-port="2" to-layer="524" to-port="0"/>
-		<edge from-layer="517" from-port="0" to-layer="519" to-port="1"/>
-		<edge from-layer="518" from-port="0" to-layer="519" to-port="2"/>
-		<edge from-layer="519" from-port="3" to-layer="524" to-port="1"/>
-		<edge from-layer="520" from-port="0" to-layer="521" to-port="1"/>
-		<edge from-layer="521" from-port="2" to-layer="523" to-port="0"/>
-		<edge from-layer="522" from-port="0" to-layer="523" to-port="1"/>
-		<edge from-layer="523" from-port="2" to-layer="524" to-port="2"/>
-		<edge from-layer="524" from-port="3" to-layer="525" to-port="1"/>
-		<edge from-layer="525" from-port="2" to-layer="532" to-port="0"/>
-		<edge from-layer="526" from-port="0" to-layer="527" to-port="1"/>
-		<edge from-layer="527" from-port="2" to-layer="528" to-port="0"/>
-		<edge from-layer="528" from-port="2" to-layer="530" to-port="0"/>
-		<edge from-layer="529" from-port="0" to-layer="530" to-port="1"/>
-		<edge from-layer="530" from-port="2" to-layer="531" to-port="0"/>
-		<edge from-layer="531" from-port="2" to-layer="532" to-port="1"/>
-		<edge from-layer="532" from-port="2" to-layer="534" to-port="0"/>
-		<edge from-layer="533" from-port="0" to-layer="534" to-port="1"/>
-		<edge from-layer="534" from-port="2" to-layer="535" to-port="0"/>
-		<edge from-layer="535" from-port="1" to-layer="542" to-port="0"/>
-		<edge from-layer="536" from-port="0" to-layer="537" to-port="1"/>
-		<edge from-layer="537" from-port="2" to-layer="538" to-port="0"/>
-		<edge from-layer="538" from-port="2" to-layer="540" to-port="0"/>
-		<edge from-layer="539" from-port="0" to-layer="540" to-port="1"/>
-		<edge from-layer="540" from-port="2" to-layer="541" to-port="0"/>
-		<edge from-layer="541" from-port="2" to-layer="542" to-port="1"/>
-		<edge from-layer="542" from-port="2" to-layer="555" to-port="0"/>
-		<edge from-layer="542" from-port="2" to-layer="543" to-port="0"/>
-		<edge from-layer="543" from-port="1" to-layer="553" to-port="0"/>
-		<edge from-layer="543" from-port="1" to-layer="546" to-port="0"/>
-		<edge from-layer="543" from-port="1" to-layer="564" to-port="0"/>
-		<edge from-layer="543" from-port="1" to-layer="567" to-port="0"/>
-		<edge from-layer="544" from-port="0" to-layer="546" to-port="1"/>
-		<edge from-layer="545" from-port="0" to-layer="546" to-port="2"/>
-		<edge from-layer="546" from-port="3" to-layer="559" to-port="0"/>
-		<edge from-layer="546" from-port="3" to-layer="548" to-port="0"/>
-		<edge from-layer="547" from-port="0" to-layer="548" to-port="1"/>
-		<edge from-layer="548" from-port="2" to-layer="550" to-port="0"/>
-		<edge from-layer="549" from-port="0" to-layer="550" to-port="1"/>
-		<edge from-layer="550" from-port="2" to-layer="554" to-port="0"/>
-		<edge from-layer="551" from-port="0" to-layer="553" to-port="1"/>
-		<edge from-layer="552" from-port="0" to-layer="553" to-port="2"/>
-		<edge from-layer="553" from-port="3" to-layer="554" to-port="2"/>
-		<edge from-layer="554" from-port="3" to-layer="555" to-port="1"/>
-		<edge from-layer="555" from-port="2" to-layer="557" to-port="0"/>
-		<edge from-layer="556" from-port="0" to-layer="557" to-port="1"/>
-		<edge from-layer="557" from-port="2" to-layer="573" to-port="0"/>
-		<edge from-layer="558" from-port="0" to-layer="559" to-port="1"/>
-		<edge from-layer="559" from-port="2" to-layer="561" to-port="0"/>
-		<edge from-layer="560" from-port="0" to-layer="561" to-port="1"/>
-		<edge from-layer="561" from-port="2" to-layer="572" to-port="0"/>
-		<edge from-layer="562" from-port="0" to-layer="564" to-port="1"/>
-		<edge from-layer="563" from-port="0" to-layer="564" to-port="2"/>
-		<edge from-layer="564" from-port="3" to-layer="572" to-port="1"/>
-		<edge from-layer="565" from-port="0" to-layer="567" to-port="1"/>
-		<edge from-layer="566" from-port="0" to-layer="567" to-port="2"/>
-		<edge from-layer="567" from-port="3" to-layer="569" to-port="0"/>
-		<edge from-layer="568" from-port="0" to-layer="569" to-port="1"/>
-		<edge from-layer="569" from-port="2" to-layer="571" to-port="0"/>
-		<edge from-layer="570" from-port="0" to-layer="571" to-port="1"/>
-		<edge from-layer="571" from-port="2" to-layer="572" to-port="2"/>
-		<edge from-layer="572" from-port="3" to-layer="573" to-port="1"/>
-		<edge from-layer="573" from-port="2" to-layer="575" to-port="0"/>
-		<edge from-layer="574" from-port="0" to-layer="575" to-port="1"/>
-		<edge from-layer="575" from-port="2" to-layer="576" to-port="1"/>
-		<edge from-layer="576" from-port="2" to-layer="577" to-port="0"/>
-		<edge from-layer="577" from-port="2" to-layer="617" to-port="1"/>
-		<edge from-layer="577" from-port="2" to-layer="579" to-port="0"/>
-		<edge from-layer="578" from-port="0" to-layer="579" to-port="1"/>
-		<edge from-layer="579" from-port="2" to-layer="581" to-port="0"/>
-		<edge from-layer="580" from-port="0" to-layer="581" to-port="1"/>
-		<edge from-layer="581" from-port="2" to-layer="583" to-port="0"/>
-		<edge from-layer="582" from-port="0" to-layer="583" to-port="1"/>
-		<edge from-layer="583" from-port="2" to-layer="585" to-port="0"/>
-		<edge from-layer="584" from-port="0" to-layer="585" to-port="1"/>
-		<edge from-layer="585" from-port="2" to-layer="586" to-port="1"/>
-		<edge from-layer="586" from-port="2" to-layer="601" to-port="0"/>
-		<edge from-layer="586" from-port="2" to-layer="611" to-port="0"/>
-		<edge from-layer="586" from-port="2" to-layer="590" to-port="0"/>
-		<edge from-layer="587" from-port="0" to-layer="601" to-port="1"/>
-		<edge from-layer="588" from-port="0" to-layer="599" to-port="0"/>
-		<edge from-layer="589" from-port="0" to-layer="599" to-port="1"/>
-		<edge from-layer="590" from-port="1" to-layer="593" to-port="0"/>
-		<edge from-layer="591" from-port="0" to-layer="593" to-port="1"/>
-		<edge from-layer="592" from-port="0" to-layer="593" to-port="2"/>
-		<edge from-layer="593" from-port="3" to-layer="595" to-port="0"/>
-		<edge from-layer="594" from-port="0" to-layer="595" to-port="1"/>
-		<edge from-layer="595" from-port="2" to-layer="597" to-port="0"/>
-		<edge from-layer="596" from-port="0" to-layer="597" to-port="1"/>
-		<edge from-layer="597" from-port="2" to-layer="599" to-port="2"/>
-		<edge from-layer="597" from-port="2" to-layer="605" to-port="2"/>
-		<edge from-layer="597" from-port="2" to-layer="608" to-port="0"/>
-		<edge from-layer="598" from-port="0" to-layer="599" to-port="3"/>
-		<edge from-layer="599" from-port="4" to-layer="601" to-port="2"/>
-		<edge from-layer="600" from-port="0" to-layer="601" to-port="3"/>
-		<edge from-layer="601" from-port="4" to-layer="613" to-port="0"/>
-		<edge from-layer="602" from-port="0" to-layer="605" to-port="0"/>
-		<edge from-layer="603" from-port="0" to-layer="605" to-port="1"/>
-		<edge from-layer="603" from-port="0" to-layer="609" to-port="1"/>
-		<edge from-layer="604" from-port="0" to-layer="605" to-port="3"/>
-		<edge from-layer="604" from-port="0" to-layer="609" to-port="3"/>
-		<edge from-layer="605" from-port="4" to-layer="611" to-port="1"/>
-		<edge from-layer="606" from-port="0" to-layer="609" to-port="0"/>
-		<edge from-layer="607" from-port="0" to-layer="608" to-port="1"/>
-		<edge from-layer="608" from-port="2" to-layer="609" to-port="2"/>
-		<edge from-layer="609" from-port="4" to-layer="611" to-port="2"/>
-		<edge from-layer="610" from-port="0" to-layer="611" to-port="3"/>
-		<edge from-layer="611" from-port="4" to-layer="612" to-port="0"/>
-		<edge from-layer="612" from-port="1" to-layer="613" to-port="1"/>
-		<edge from-layer="613" from-port="2" to-layer="615" to-port="0"/>
-		<edge from-layer="614" from-port="0" to-layer="615" to-port="1"/>
-		<edge from-layer="615" from-port="2" to-layer="616" to-port="1"/>
-		<edge from-layer="616" from-port="2" to-layer="617" to-port="0"/>
-		<edge from-layer="617" from-port="2" to-layer="618" to-port="0"/>
-		<edge from-layer="618" from-port="2" to-layer="620" to-port="0"/>
-		<edge from-layer="619" from-port="0" to-layer="620" to-port="1"/>
-		<edge from-layer="620" from-port="2" to-layer="622" to-port="0"/>
-		<edge from-layer="621" from-port="0" to-layer="622" to-port="1"/>
-		<edge from-layer="622" from-port="2" to-layer="624" to-port="0"/>
-		<edge from-layer="623" from-port="0" to-layer="624" to-port="1"/>
-		<edge from-layer="624" from-port="2" to-layer="625" to-port="0"/>
-		<edge from-layer="625" from-port="2" to-layer="4394" to-port="1"/>
-		<edge from-layer="625" from-port="2" to-layer="627" to-port="0"/>
-		<edge from-layer="626" from-port="0" to-layer="627" to-port="1"/>
-		<edge from-layer="627" from-port="2" to-layer="629" to-port="0"/>
-		<edge from-layer="628" from-port="0" to-layer="629" to-port="1"/>
-		<edge from-layer="629" from-port="2" to-layer="638" to-port="0"/>
-		<edge from-layer="629" from-port="2" to-layer="4027" to-port="1"/>
-		<edge from-layer="629" from-port="2" to-layer="635" to-port="0"/>
-		<edge from-layer="629" from-port="2" to-layer="631" to-port="0"/>
-		<edge from-layer="630" from-port="0" to-layer="631" to-port="1"/>
-		<edge from-layer="631" from-port="2" to-layer="633" to-port="0"/>
-		<edge from-layer="632" from-port="0" to-layer="633" to-port="1"/>
-		<edge from-layer="633" from-port="2" to-layer="674" to-port="0"/>
-		<edge from-layer="634" from-port="0" to-layer="635" to-port="1"/>
-		<edge from-layer="635" from-port="2" to-layer="637" to-port="0"/>
-		<edge from-layer="636" from-port="0" to-layer="637" to-port="1"/>
-		<edge from-layer="637" from-port="2" to-layer="639" to-port="0"/>
-		<edge from-layer="638" from-port="1" to-layer="639" to-port="1"/>
-		<edge from-layer="639" from-port="2" to-layer="641" to-port="0"/>
-		<edge from-layer="640" from-port="0" to-layer="641" to-port="1"/>
-		<edge from-layer="641" from-port="2" to-layer="643" to-port="0"/>
-		<edge from-layer="642" from-port="0" to-layer="643" to-port="1"/>
-		<edge from-layer="643" from-port="2" to-layer="644" to-port="0"/>
-		<edge from-layer="644" from-port="1" to-layer="646" to-port="0"/>
-		<edge from-layer="645" from-port="0" to-layer="646" to-port="1"/>
-		<edge from-layer="646" from-port="2" to-layer="648" to-port="0"/>
-		<edge from-layer="647" from-port="0" to-layer="648" to-port="1"/>
-		<edge from-layer="648" from-port="2" to-layer="658" to-port="0"/>
-		<edge from-layer="649" from-port="1" to-layer="651" to-port="0"/>
-		<edge from-layer="650" from-port="0" to-layer="651" to-port="1"/>
-		<edge from-layer="651" from-port="2" to-layer="653" to-port="0"/>
-		<edge from-layer="652" from-port="0" to-layer="653" to-port="1"/>
-		<edge from-layer="653" from-port="2" to-layer="655" to-port="0"/>
-		<edge from-layer="654" from-port="0" to-layer="655" to-port="1"/>
-		<edge from-layer="655" from-port="2" to-layer="657" to-port="0"/>
-		<edge from-layer="656" from-port="0" to-layer="657" to-port="1"/>
-		<edge from-layer="657" from-port="2" to-layer="658" to-port="1"/>
-		<edge from-layer="658" from-port="2" to-layer="663" to-port="0"/>
-		<edge from-layer="658" from-port="2" to-layer="660" to-port="0"/>
-		<edge from-layer="659" from-port="0" to-layer="660" to-port="1"/>
-		<edge from-layer="660" from-port="2" to-layer="662" to-port="0"/>
-		<edge from-layer="661" from-port="0" to-layer="662" to-port="1"/>
-		<edge from-layer="662" from-port="2" to-layer="664" to-port="0"/>
-		<edge from-layer="663" from-port="1" to-layer="664" to-port="1"/>
-		<edge from-layer="664" from-port="2" to-layer="666" to-port="0"/>
-		<edge from-layer="665" from-port="0" to-layer="666" to-port="1"/>
-		<edge from-layer="666" from-port="2" to-layer="668" to-port="0"/>
-		<edge from-layer="667" from-port="0" to-layer="668" to-port="1"/>
-		<edge from-layer="668" from-port="2" to-layer="669" to-port="0"/>
-		<edge from-layer="669" from-port="1" to-layer="671" to-port="0"/>
-		<edge from-layer="670" from-port="0" to-layer="671" to-port="1"/>
-		<edge from-layer="671" from-port="2" to-layer="673" to-port="0"/>
-		<edge from-layer="672" from-port="0" to-layer="673" to-port="1"/>
-		<edge from-layer="673" from-port="2" to-layer="674" to-port="1"/>
-		<edge from-layer="674" from-port="2" to-layer="898" to-port="1"/>
-		<edge from-layer="674" from-port="2" to-layer="676" to-port="0"/>
-		<edge from-layer="674" from-port="2" to-layer="679" to-port="0"/>
-		<edge from-layer="675" from-port="0" to-layer="676" to-port="1"/>
-		<edge from-layer="676" from-port="2" to-layer="678" to-port="0"/>
-		<edge from-layer="677" from-port="0" to-layer="678" to-port="1"/>
-		<edge from-layer="678" from-port="2" to-layer="680" to-port="0"/>
-		<edge from-layer="679" from-port="1" to-layer="680" to-port="1"/>
-		<edge from-layer="680" from-port="2" to-layer="682" to-port="0"/>
-		<edge from-layer="681" from-port="0" to-layer="682" to-port="1"/>
-		<edge from-layer="682" from-port="2" to-layer="684" to-port="0"/>
-		<edge from-layer="683" from-port="0" to-layer="684" to-port="1"/>
-		<edge from-layer="684" from-port="2" to-layer="686" to-port="0"/>
-		<edge from-layer="685" from-port="0" to-layer="686" to-port="1"/>
-		<edge from-layer="686" from-port="2" to-layer="688" to-port="0"/>
-		<edge from-layer="687" from-port="0" to-layer="688" to-port="1"/>
-		<edge from-layer="688" from-port="2" to-layer="690" to-port="0"/>
-		<edge from-layer="689" from-port="0" to-layer="690" to-port="1"/>
-		<edge from-layer="690" from-port="2" to-layer="692" to-port="0"/>
-		<edge from-layer="691" from-port="0" to-layer="956" to-port="1"/>
-		<edge from-layer="691" from-port="0" to-layer="692" to-port="1"/>
-		<edge from-layer="692" from-port="2" to-layer="758" to-port="1"/>
-		<edge from-layer="692" from-port="2" to-layer="694" to-port="0"/>
-		<edge from-layer="693" from-port="0" to-layer="694" to-port="1"/>
-		<edge from-layer="694" from-port="2" to-layer="696" to-port="0"/>
-		<edge from-layer="695" from-port="0" to-layer="696" to-port="1"/>
-		<edge from-layer="696" from-port="2" to-layer="698" to-port="0"/>
-		<edge from-layer="697" from-port="0" to-layer="698" to-port="1"/>
-		<edge from-layer="698" from-port="2" to-layer="708" to-port="0"/>
-		<edge from-layer="698" from-port="2" to-layer="718" to-port="0"/>
-		<edge from-layer="698" from-port="2" to-layer="700" to-port="0"/>
-		<edge from-layer="699" from-port="0" to-layer="700" to-port="1"/>
-		<edge from-layer="700" from-port="2" to-layer="702" to-port="0"/>
-		<edge from-layer="701" from-port="0" to-layer="702" to-port="1"/>
-		<edge from-layer="701" from-port="0" to-layer="719" to-port="1"/>
-		<edge from-layer="701" from-port="0" to-layer="709" to-port="1"/>
-		<edge from-layer="701" from-port="0" to-layer="981" to-port="1"/>
-		<edge from-layer="701" from-port="0" to-layer="965" to-port="1"/>
-		<edge from-layer="701" from-port="0" to-layer="971" to-port="1"/>
-		<edge from-layer="702" from-port="2" to-layer="704" to-port="0"/>
-		<edge from-layer="703" from-port="0" to-layer="704" to-port="1"/>
-		<edge from-layer="704" from-port="2" to-layer="706" to-port="0"/>
-		<edge from-layer="705" from-port="0" to-layer="722" to-port="1"/>
-		<edge from-layer="705" from-port="0" to-layer="984" to-port="1"/>
-		<edge from-layer="705" from-port="0" to-layer="968" to-port="1"/>
-		<edge from-layer="705" from-port="0" to-layer="712" to-port="1"/>
-		<edge from-layer="705" from-port="0" to-layer="974" to-port="1"/>
-		<edge from-layer="705" from-port="0" to-layer="706" to-port="1"/>
-		<edge from-layer="706" from-port="2" to-layer="713" to-port="0"/>
-		<edge from-layer="707" from-port="0" to-layer="708" to-port="1"/>
-		<edge from-layer="708" from-port="2" to-layer="709" to-port="0"/>
-		<edge from-layer="709" from-port="2" to-layer="711" to-port="0"/>
-		<edge from-layer="710" from-port="0" to-layer="711" to-port="1"/>
-		<edge from-layer="711" from-port="2" to-layer="712" to-port="0"/>
-		<edge from-layer="712" from-port="2" to-layer="713" to-port="1"/>
-		<edge from-layer="713" from-port="2" to-layer="715" to-port="0"/>
-		<edge from-layer="714" from-port="0" to-layer="715" to-port="1"/>
-		<edge from-layer="715" from-port="2" to-layer="716" to-port="0"/>
-		<edge from-layer="716" from-port="1" to-layer="723" to-port="0"/>
-		<edge from-layer="717" from-port="0" to-layer="718" to-port="1"/>
-		<edge from-layer="718" from-port="2" to-layer="719" to-port="0"/>
-		<edge from-layer="719" from-port="2" to-layer="721" to-port="0"/>
-		<edge from-layer="720" from-port="0" to-layer="721" to-port="1"/>
-		<edge from-layer="721" from-port="2" to-layer="722" to-port="0"/>
-		<edge from-layer="722" from-port="2" to-layer="723" to-port="1"/>
-		<edge from-layer="723" from-port="2" to-layer="736" to-port="0"/>
-		<edge from-layer="723" from-port="2" to-layer="724" to-port="0"/>
-		<edge from-layer="724" from-port="1" to-layer="727" to-port="0"/>
-		<edge from-layer="724" from-port="1" to-layer="734" to-port="0"/>
-		<edge from-layer="724" from-port="1" to-layer="748" to-port="0"/>
-		<edge from-layer="724" from-port="1" to-layer="745" to-port="0"/>
-		<edge from-layer="725" from-port="0" to-layer="727" to-port="1"/>
-		<edge from-layer="726" from-port="0" to-layer="727" to-port="2"/>
-		<edge from-layer="727" from-port="3" to-layer="740" to-port="0"/>
-		<edge from-layer="727" from-port="3" to-layer="729" to-port="0"/>
-		<edge from-layer="728" from-port="0" to-layer="729" to-port="1"/>
-		<edge from-layer="729" from-port="2" to-layer="731" to-port="0"/>
-		<edge from-layer="730" from-port="0" to-layer="731" to-port="1"/>
-		<edge from-layer="731" from-port="2" to-layer="735" to-port="0"/>
-		<edge from-layer="732" from-port="0" to-layer="734" to-port="1"/>
-		<edge from-layer="733" from-port="0" to-layer="734" to-port="2"/>
-		<edge from-layer="734" from-port="3" to-layer="735" to-port="2"/>
-		<edge from-layer="735" from-port="3" to-layer="736" to-port="1"/>
-		<edge from-layer="736" from-port="2" to-layer="738" to-port="0"/>
-		<edge from-layer="737" from-port="0" to-layer="738" to-port="1"/>
-		<edge from-layer="738" from-port="2" to-layer="754" to-port="0"/>
-		<edge from-layer="739" from-port="0" to-layer="740" to-port="1"/>
-		<edge from-layer="740" from-port="2" to-layer="742" to-port="0"/>
-		<edge from-layer="741" from-port="0" to-layer="742" to-port="1"/>
-		<edge from-layer="742" from-port="2" to-layer="753" to-port="0"/>
-		<edge from-layer="743" from-port="0" to-layer="745" to-port="1"/>
-		<edge from-layer="744" from-port="0" to-layer="745" to-port="2"/>
-		<edge from-layer="745" from-port="3" to-layer="753" to-port="1"/>
-		<edge from-layer="746" from-port="0" to-layer="748" to-port="1"/>
-		<edge from-layer="747" from-port="0" to-layer="748" to-port="2"/>
-		<edge from-layer="748" from-port="3" to-layer="750" to-port="0"/>
-		<edge from-layer="749" from-port="0" to-layer="750" to-port="1"/>
-		<edge from-layer="750" from-port="2" to-layer="752" to-port="0"/>
-		<edge from-layer="751" from-port="0" to-layer="752" to-port="1"/>
-		<edge from-layer="752" from-port="2" to-layer="753" to-port="2"/>
-		<edge from-layer="753" from-port="3" to-layer="754" to-port="1"/>
-		<edge from-layer="754" from-port="2" to-layer="756" to-port="0"/>
-		<edge from-layer="755" from-port="0" to-layer="756" to-port="1"/>
-		<edge from-layer="756" from-port="2" to-layer="757" to-port="1"/>
-		<edge from-layer="757" from-port="2" to-layer="758" to-port="0"/>
-		<edge from-layer="758" from-port="2" to-layer="760" to-port="0"/>
-		<edge from-layer="758" from-port="2" to-layer="849" to-port="1"/>
-		<edge from-layer="759" from-port="0" to-layer="760" to-port="1"/>
-		<edge from-layer="760" from-port="2" to-layer="762" to-port="0"/>
-		<edge from-layer="761" from-port="0" to-layer="762" to-port="1"/>
-		<edge from-layer="762" from-port="2" to-layer="764" to-port="0"/>
-		<edge from-layer="763" from-port="0" to-layer="764" to-port="1"/>
-		<edge from-layer="764" from-port="2" to-layer="766" to-port="0"/>
-		<edge from-layer="765" from-port="0" to-layer="766" to-port="1"/>
-		<edge from-layer="766" from-port="2" to-layer="777" to-port="0"/>
-		<edge from-layer="766" from-port="2" to-layer="768" to-port="0"/>
-		<edge from-layer="767" from-port="0" to-layer="776" to-port="0"/>
-		<edge from-layer="768" from-port="1" to-layer="771" to-port="0"/>
-		<edge from-layer="768" from-port="1" to-layer="789" to-port="0"/>
-		<edge from-layer="768" from-port="1" to-layer="782" to-port="0"/>
-		<edge from-layer="769" from-port="0" to-layer="771" to-port="1"/>
-		<edge from-layer="770" from-port="0" to-layer="771" to-port="2"/>
-		<edge from-layer="771" from-port="3" to-layer="791" to-port="0"/>
-		<edge from-layer="771" from-port="3" to-layer="773" to-port="0"/>
-		<edge from-layer="772" from-port="0" to-layer="773" to-port="1"/>
-		<edge from-layer="773" from-port="2" to-layer="775" to-port="0"/>
-		<edge from-layer="774" from-port="0" to-layer="775" to-port="1"/>
-		<edge from-layer="775" from-port="2" to-layer="776" to-port="2"/>
-		<edge from-layer="776" from-port="3" to-layer="777" to-port="1"/>
-		<edge from-layer="777" from-port="2" to-layer="779" to-port="0"/>
-		<edge from-layer="778" from-port="0" to-layer="779" to-port="1"/>
-		<edge from-layer="779" from-port="2" to-layer="795" to-port="0"/>
-		<edge from-layer="780" from-port="0" to-layer="782" to-port="1"/>
-		<edge from-layer="781" from-port="0" to-layer="782" to-port="2"/>
-		<edge from-layer="782" from-port="3" to-layer="784" to-port="0"/>
-		<edge from-layer="783" from-port="0" to-layer="784" to-port="1"/>
-		<edge from-layer="784" from-port="2" to-layer="786" to-port="0"/>
-		<edge from-layer="785" from-port="0" to-layer="786" to-port="1"/>
-		<edge from-layer="786" from-port="2" to-layer="794" to-port="0"/>
-		<edge from-layer="787" from-port="0" to-layer="789" to-port="1"/>
-		<edge from-layer="788" from-port="0" to-layer="789" to-port="2"/>
-		<edge from-layer="789" from-port="3" to-layer="794" to-port="1"/>
-		<edge from-layer="790" from-port="0" to-layer="791" to-port="1"/>
-		<edge from-layer="791" from-port="2" to-layer="793" to-port="0"/>
-		<edge from-layer="792" from-port="0" to-layer="793" to-port="1"/>
-		<edge from-layer="793" from-port="2" to-layer="794" to-port="2"/>
-		<edge from-layer="794" from-port="3" to-layer="795" to-port="1"/>
-		<edge from-layer="795" from-port="2" to-layer="804" to-port="0"/>
-		<edge from-layer="796" from-port="0" to-layer="797" to-port="1"/>
-		<edge from-layer="797" from-port="2" to-layer="799" to-port="0"/>
-		<edge from-layer="798" from-port="0" to-layer="1060" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="810" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="3936" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="3926" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="3578" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="3568" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="799" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="1070" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="4294" to-port="1"/>
-		<edge from-layer="798" from-port="0" to-layer="4284" to-port="1"/>
-		<edge from-layer="799" from-port="2" to-layer="801" to-port="0"/>
-		<edge from-layer="800" from-port="0" to-layer="801" to-port="1"/>
-		<edge from-layer="801" from-port="2" to-layer="803" to-port="0"/>
-		<edge from-layer="802" from-port="0" to-layer="1063" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="1073" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="3939" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="4297" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="3929" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="3581" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="4287" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="3571" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="813" to-port="1"/>
-		<edge from-layer="802" from-port="0" to-layer="803" to-port="1"/>
-		<edge from-layer="803" from-port="2" to-layer="804" to-port="1"/>
-		<edge from-layer="804" from-port="2" to-layer="806" to-port="0"/>
-		<edge from-layer="805" from-port="0" to-layer="806" to-port="1"/>
-		<edge from-layer="806" from-port="2" to-layer="807" to-port="0"/>
-		<edge from-layer="807" from-port="1" to-layer="814" to-port="0"/>
-		<edge from-layer="808" from-port="0" to-layer="809" to-port="1"/>
-		<edge from-layer="809" from-port="2" to-layer="810" to-port="0"/>
-		<edge from-layer="810" from-port="2" to-layer="812" to-port="0"/>
-		<edge from-layer="811" from-port="0" to-layer="812" to-port="1"/>
-		<edge from-layer="812" from-port="2" to-layer="813" to-port="0"/>
-		<edge from-layer="813" from-port="2" to-layer="814" to-port="1"/>
-		<edge from-layer="814" from-port="2" to-layer="815" to-port="0"/>
-		<edge from-layer="814" from-port="2" to-layer="827" to-port="0"/>
-		<edge from-layer="815" from-port="1" to-layer="818" to-port="0"/>
-		<edge from-layer="815" from-port="1" to-layer="839" to-port="0"/>
-		<edge from-layer="815" from-port="1" to-layer="825" to-port="0"/>
-		<edge from-layer="815" from-port="1" to-layer="836" to-port="0"/>
-		<edge from-layer="816" from-port="0" to-layer="818" to-port="1"/>
-		<edge from-layer="817" from-port="0" to-layer="818" to-port="2"/>
-		<edge from-layer="818" from-port="3" to-layer="831" to-port="0"/>
-		<edge from-layer="818" from-port="3" to-layer="820" to-port="0"/>
-		<edge from-layer="819" from-port="0" to-layer="820" to-port="1"/>
-		<edge from-layer="820" from-port="2" to-layer="822" to-port="0"/>
-		<edge from-layer="821" from-port="0" to-layer="822" to-port="1"/>
-		<edge from-layer="822" from-port="2" to-layer="826" to-port="0"/>
-		<edge from-layer="823" from-port="0" to-layer="825" to-port="1"/>
-		<edge from-layer="824" from-port="0" to-layer="825" to-port="2"/>
-		<edge from-layer="825" from-port="3" to-layer="826" to-port="2"/>
-		<edge from-layer="826" from-port="3" to-layer="827" to-port="1"/>
-		<edge from-layer="827" from-port="2" to-layer="829" to-port="0"/>
-		<edge from-layer="828" from-port="0" to-layer="829" to-port="1"/>
-		<edge from-layer="829" from-port="2" to-layer="845" to-port="0"/>
-		<edge from-layer="830" from-port="0" to-layer="831" to-port="1"/>
-		<edge from-layer="831" from-port="2" to-layer="833" to-port="0"/>
-		<edge from-layer="832" from-port="0" to-layer="833" to-port="1"/>
-		<edge from-layer="833" from-port="2" to-layer="844" to-port="0"/>
-		<edge from-layer="834" from-port="0" to-layer="836" to-port="1"/>
-		<edge from-layer="835" from-port="0" to-layer="836" to-port="2"/>
-		<edge from-layer="836" from-port="3" to-layer="844" to-port="1"/>
-		<edge from-layer="837" from-port="0" to-layer="839" to-port="1"/>
-		<edge from-layer="838" from-port="0" to-layer="839" to-port="2"/>
-		<edge from-layer="839" from-port="3" to-layer="841" to-port="0"/>
-		<edge from-layer="840" from-port="0" to-layer="841" to-port="1"/>
-		<edge from-layer="841" from-port="2" to-layer="843" to-port="0"/>
-		<edge from-layer="842" from-port="0" to-layer="843" to-port="1"/>
-		<edge from-layer="843" from-port="2" to-layer="844" to-port="2"/>
-		<edge from-layer="844" from-port="3" to-layer="845" to-port="1"/>
-		<edge from-layer="845" from-port="2" to-layer="847" to-port="0"/>
-		<edge from-layer="846" from-port="0" to-layer="847" to-port="1"/>
-		<edge from-layer="847" from-port="2" to-layer="848" to-port="1"/>
-		<edge from-layer="848" from-port="2" to-layer="849" to-port="0"/>
-		<edge from-layer="849" from-port="2" to-layer="851" to-port="0"/>
-		<edge from-layer="849" from-port="2" to-layer="889" to-port="1"/>
-		<edge from-layer="850" from-port="0" to-layer="851" to-port="1"/>
-		<edge from-layer="851" from-port="2" to-layer="853" to-port="0"/>
-		<edge from-layer="852" from-port="0" to-layer="853" to-port="1"/>
-		<edge from-layer="853" from-port="2" to-layer="855" to-port="0"/>
-		<edge from-layer="854" from-port="0" to-layer="855" to-port="1"/>
-		<edge from-layer="855" from-port="2" to-layer="857" to-port="0"/>
-		<edge from-layer="856" from-port="0" to-layer="857" to-port="1"/>
-		<edge from-layer="857" from-port="2" to-layer="858" to-port="1"/>
-		<edge from-layer="858" from-port="2" to-layer="862" to-port="0"/>
-		<edge from-layer="858" from-port="2" to-layer="873" to-port="0"/>
-		<edge from-layer="858" from-port="2" to-layer="883" to-port="0"/>
-		<edge from-layer="859" from-port="0" to-layer="873" to-port="1"/>
-		<edge from-layer="860" from-port="0" to-layer="871" to-port="0"/>
-		<edge from-layer="861" from-port="0" to-layer="871" to-port="1"/>
-		<edge from-layer="862" from-port="1" to-layer="865" to-port="0"/>
-		<edge from-layer="863" from-port="0" to-layer="865" to-port="1"/>
-		<edge from-layer="864" from-port="0" to-layer="865" to-port="2"/>
-		<edge from-layer="865" from-port="3" to-layer="867" to-port="0"/>
-		<edge from-layer="866" from-port="0" to-layer="867" to-port="1"/>
-		<edge from-layer="867" from-port="2" to-layer="869" to-port="0"/>
-		<edge from-layer="868" from-port="0" to-layer="869" to-port="1"/>
-		<edge from-layer="869" from-port="2" to-layer="871" to-port="2"/>
-		<edge from-layer="869" from-port="2" to-layer="880" to-port="0"/>
-		<edge from-layer="869" from-port="2" to-layer="877" to-port="2"/>
-		<edge from-layer="870" from-port="0" to-layer="871" to-port="3"/>
-		<edge from-layer="871" from-port="4" to-layer="873" to-port="2"/>
-		<edge from-layer="872" from-port="0" to-layer="873" to-port="3"/>
-		<edge from-layer="873" from-port="4" to-layer="885" to-port="0"/>
-		<edge from-layer="874" from-port="0" to-layer="877" to-port="0"/>
-		<edge from-layer="875" from-port="0" to-layer="877" to-port="1"/>
-		<edge from-layer="875" from-port="0" to-layer="881" to-port="1"/>
-		<edge from-layer="876" from-port="0" to-layer="877" to-port="3"/>
-		<edge from-layer="876" from-port="0" to-layer="881" to-port="3"/>
-		<edge from-layer="877" from-port="4" to-layer="883" to-port="1"/>
-		<edge from-layer="878" from-port="0" to-layer="881" to-port="0"/>
-		<edge from-layer="879" from-port="0" to-layer="880" to-port="1"/>
-		<edge from-layer="880" from-port="2" to-layer="881" to-port="2"/>
-		<edge from-layer="881" from-port="4" to-layer="883" to-port="2"/>
-		<edge from-layer="882" from-port="0" to-layer="883" to-port="3"/>
-		<edge from-layer="883" from-port="4" to-layer="884" to-port="0"/>
-		<edge from-layer="884" from-port="1" to-layer="885" to-port="1"/>
-		<edge from-layer="885" from-port="2" to-layer="887" to-port="0"/>
-		<edge from-layer="886" from-port="0" to-layer="887" to-port="1"/>
-		<edge from-layer="887" from-port="2" to-layer="888" to-port="1"/>
-		<edge from-layer="888" from-port="2" to-layer="889" to-port="0"/>
-		<edge from-layer="889" from-port="2" to-layer="891" to-port="0"/>
-		<edge from-layer="890" from-port="0" to-layer="891" to-port="1"/>
-		<edge from-layer="890" from-port="0" to-layer="1150" to-port="1"/>
-		<edge from-layer="891" from-port="2" to-layer="893" to-port="0"/>
-		<edge from-layer="892" from-port="0" to-layer="893" to-port="1"/>
-		<edge from-layer="893" from-port="2" to-layer="895" to-port="0"/>
-		<edge from-layer="894" from-port="0" to-layer="895" to-port="1"/>
-		<edge from-layer="895" from-port="2" to-layer="897" to-port="0"/>
-		<edge from-layer="896" from-port="0" to-layer="897" to-port="1"/>
-		<edge from-layer="897" from-port="2" to-layer="898" to-port="0"/>
-		<edge from-layer="898" from-port="2" to-layer="939" to-port="0"/>
-		<edge from-layer="898" from-port="2" to-layer="3669" to-port="1"/>
-		<edge from-layer="898" from-port="2" to-layer="903" to-port="0"/>
-		<edge from-layer="898" from-port="2" to-layer="900" to-port="0"/>
-		<edge from-layer="899" from-port="0" to-layer="900" to-port="1"/>
-		<edge from-layer="900" from-port="2" to-layer="902" to-port="0"/>
-		<edge from-layer="901" from-port="0" to-layer="902" to-port="1"/>
-		<edge from-layer="902" from-port="2" to-layer="904" to-port="0"/>
-		<edge from-layer="903" from-port="1" to-layer="904" to-port="1"/>
-		<edge from-layer="904" from-port="2" to-layer="906" to-port="0"/>
-		<edge from-layer="905" from-port="0" to-layer="906" to-port="1"/>
-		<edge from-layer="906" from-port="2" to-layer="908" to-port="0"/>
-		<edge from-layer="907" from-port="0" to-layer="908" to-port="1"/>
-		<edge from-layer="908" from-port="2" to-layer="909" to-port="0"/>
-		<edge from-layer="909" from-port="1" to-layer="911" to-port="0"/>
-		<edge from-layer="910" from-port="0" to-layer="911" to-port="1"/>
-		<edge from-layer="911" from-port="2" to-layer="913" to-port="0"/>
-		<edge from-layer="912" from-port="0" to-layer="913" to-port="1"/>
-		<edge from-layer="913" from-port="2" to-layer="923" to-port="0"/>
-		<edge from-layer="914" from-port="1" to-layer="916" to-port="0"/>
-		<edge from-layer="915" from-port="0" to-layer="916" to-port="1"/>
-		<edge from-layer="916" from-port="2" to-layer="918" to-port="0"/>
-		<edge from-layer="917" from-port="0" to-layer="918" to-port="1"/>
-		<edge from-layer="918" from-port="2" to-layer="920" to-port="0"/>
-		<edge from-layer="919" from-port="0" to-layer="920" to-port="1"/>
-		<edge from-layer="920" from-port="2" to-layer="922" to-port="0"/>
-		<edge from-layer="921" from-port="0" to-layer="922" to-port="1"/>
-		<edge from-layer="922" from-port="2" to-layer="923" to-port="1"/>
-		<edge from-layer="923" from-port="2" to-layer="925" to-port="0"/>
-		<edge from-layer="923" from-port="2" to-layer="928" to-port="0"/>
-		<edge from-layer="924" from-port="0" to-layer="925" to-port="1"/>
-		<edge from-layer="925" from-port="2" to-layer="927" to-port="0"/>
-		<edge from-layer="926" from-port="0" to-layer="927" to-port="1"/>
-		<edge from-layer="927" from-port="2" to-layer="929" to-port="0"/>
-		<edge from-layer="928" from-port="1" to-layer="929" to-port="1"/>
-		<edge from-layer="929" from-port="2" to-layer="931" to-port="0"/>
-		<edge from-layer="930" from-port="0" to-layer="931" to-port="1"/>
-		<edge from-layer="931" from-port="2" to-layer="933" to-port="0"/>
-		<edge from-layer="932" from-port="0" to-layer="933" to-port="1"/>
-		<edge from-layer="933" from-port="2" to-layer="934" to-port="0"/>
-		<edge from-layer="934" from-port="1" to-layer="936" to-port="0"/>
-		<edge from-layer="935" from-port="0" to-layer="936" to-port="1"/>
-		<edge from-layer="936" from-port="2" to-layer="938" to-port="0"/>
-		<edge from-layer="937" from-port="0" to-layer="938" to-port="1"/>
-		<edge from-layer="938" from-port="2" to-layer="939" to-port="1"/>
-		<edge from-layer="939" from-port="2" to-layer="941" to-port="0"/>
-		<edge from-layer="939" from-port="2" to-layer="944" to-port="0"/>
-		<edge from-layer="939" from-port="2" to-layer="1157" to-port="1"/>
-		<edge from-layer="940" from-port="0" to-layer="941" to-port="1"/>
-		<edge from-layer="941" from-port="2" to-layer="943" to-port="0"/>
-		<edge from-layer="942" from-port="0" to-layer="943" to-port="1"/>
-		<edge from-layer="943" from-port="2" to-layer="945" to-port="0"/>
-		<edge from-layer="944" from-port="1" to-layer="945" to-port="1"/>
-		<edge from-layer="945" from-port="2" to-layer="947" to-port="0"/>
-		<edge from-layer="946" from-port="0" to-layer="947" to-port="1"/>
-		<edge from-layer="947" from-port="2" to-layer="949" to-port="0"/>
-		<edge from-layer="948" from-port="0" to-layer="949" to-port="1"/>
-		<edge from-layer="949" from-port="2" to-layer="951" to-port="0"/>
-		<edge from-layer="950" from-port="0" to-layer="951" to-port="1"/>
-		<edge from-layer="951" from-port="2" to-layer="953" to-port="0"/>
-		<edge from-layer="952" from-port="0" to-layer="953" to-port="1"/>
-		<edge from-layer="953" from-port="2" to-layer="955" to-port="0"/>
-		<edge from-layer="954" from-port="0" to-layer="955" to-port="1"/>
-		<edge from-layer="955" from-port="2" to-layer="956" to-port="0"/>
-		<edge from-layer="956" from-port="2" to-layer="1020" to-port="1"/>
-		<edge from-layer="956" from-port="2" to-layer="958" to-port="0"/>
-		<edge from-layer="957" from-port="0" to-layer="958" to-port="1"/>
-		<edge from-layer="958" from-port="2" to-layer="960" to-port="0"/>
-		<edge from-layer="959" from-port="0" to-layer="960" to-port="1"/>
-		<edge from-layer="960" from-port="2" to-layer="962" to-port="0"/>
-		<edge from-layer="961" from-port="0" to-layer="962" to-port="1"/>
-		<edge from-layer="962" from-port="2" to-layer="970" to-port="0"/>
-		<edge from-layer="962" from-port="2" to-layer="964" to-port="0"/>
-		<edge from-layer="962" from-port="2" to-layer="980" to-port="0"/>
-		<edge from-layer="963" from-port="0" to-layer="964" to-port="1"/>
-		<edge from-layer="964" from-port="2" to-layer="965" to-port="0"/>
-		<edge from-layer="965" from-port="2" to-layer="967" to-port="0"/>
-		<edge from-layer="966" from-port="0" to-layer="967" to-port="1"/>
-		<edge from-layer="967" from-port="2" to-layer="968" to-port="0"/>
-		<edge from-layer="968" from-port="2" to-layer="975" to-port="0"/>
-		<edge from-layer="969" from-port="0" to-layer="970" to-port="1"/>
-		<edge from-layer="970" from-port="2" to-layer="971" to-port="0"/>
-		<edge from-layer="971" from-port="2" to-layer="973" to-port="0"/>
-		<edge from-layer="972" from-port="0" to-layer="973" to-port="1"/>
-		<edge from-layer="973" from-port="2" to-layer="974" to-port="0"/>
-		<edge from-layer="974" from-port="2" to-layer="975" to-port="1"/>
-		<edge from-layer="975" from-port="2" to-layer="977" to-port="0"/>
-		<edge from-layer="976" from-port="0" to-layer="977" to-port="1"/>
-		<edge from-layer="977" from-port="2" to-layer="978" to-port="0"/>
-		<edge from-layer="978" from-port="1" to-layer="985" to-port="0"/>
-		<edge from-layer="979" from-port="0" to-layer="980" to-port="1"/>
-		<edge from-layer="980" from-port="2" to-layer="981" to-port="0"/>
-		<edge from-layer="981" from-port="2" to-layer="983" to-port="0"/>
-		<edge from-layer="982" from-port="0" to-layer="983" to-port="1"/>
-		<edge from-layer="983" from-port="2" to-layer="984" to-port="0"/>
-		<edge from-layer="984" from-port="2" to-layer="985" to-port="1"/>
-		<edge from-layer="985" from-port="2" to-layer="986" to-port="0"/>
-		<edge from-layer="985" from-port="2" to-layer="998" to-port="0"/>
-		<edge from-layer="986" from-port="1" to-layer="1007" to-port="0"/>
-		<edge from-layer="986" from-port="1" to-layer="1010" to-port="0"/>
-		<edge from-layer="986" from-port="1" to-layer="996" to-port="0"/>
-		<edge from-layer="986" from-port="1" to-layer="989" to-port="0"/>
-		<edge from-layer="987" from-port="0" to-layer="989" to-port="1"/>
-		<edge from-layer="988" from-port="0" to-layer="989" to-port="2"/>
-		<edge from-layer="989" from-port="3" to-layer="991" to-port="0"/>
-		<edge from-layer="989" from-port="3" to-layer="1002" to-port="0"/>
-		<edge from-layer="990" from-port="0" to-layer="991" to-port="1"/>
-		<edge from-layer="991" from-port="2" to-layer="993" to-port="0"/>
-		<edge from-layer="992" from-port="0" to-layer="993" to-port="1"/>
-		<edge from-layer="993" from-port="2" to-layer="997" to-port="0"/>
-		<edge from-layer="994" from-port="0" to-layer="996" to-port="1"/>
-		<edge from-layer="995" from-port="0" to-layer="996" to-port="2"/>
-		<edge from-layer="996" from-port="3" to-layer="997" to-port="2"/>
-		<edge from-layer="997" from-port="3" to-layer="998" to-port="1"/>
-		<edge from-layer="998" from-port="2" to-layer="1000" to-port="0"/>
-		<edge from-layer="999" from-port="0" to-layer="1000" to-port="1"/>
-		<edge from-layer="1000" from-port="2" to-layer="1016" to-port="0"/>
-		<edge from-layer="1001" from-port="0" to-layer="1002" to-port="1"/>
-		<edge from-layer="1002" from-port="2" to-layer="1004" to-port="0"/>
-		<edge from-layer="1003" from-port="0" to-layer="1004" to-port="1"/>
-		<edge from-layer="1004" from-port="2" to-layer="1015" to-port="0"/>
-		<edge from-layer="1005" from-port="0" to-layer="1007" to-port="1"/>
-		<edge from-layer="1006" from-port="0" to-layer="1007" to-port="2"/>
-		<edge from-layer="1007" from-port="3" to-layer="1015" to-port="1"/>
-		<edge from-layer="1008" from-port="0" to-layer="1010" to-port="1"/>
-		<edge from-layer="1009" from-port="0" to-layer="1010" to-port="2"/>
-		<edge from-layer="1010" from-port="3" to-layer="1012" to-port="0"/>
-		<edge from-layer="1011" from-port="0" to-layer="1012" to-port="1"/>
-		<edge from-layer="1012" from-port="2" to-layer="1014" to-port="0"/>
-		<edge from-layer="1013" from-port="0" to-layer="1014" to-port="1"/>
-		<edge from-layer="1014" from-port="2" to-layer="1015" to-port="2"/>
-		<edge from-layer="1015" from-port="3" to-layer="1016" to-port="1"/>
-		<edge from-layer="1016" from-port="2" to-layer="1018" to-port="0"/>
-		<edge from-layer="1017" from-port="0" to-layer="1018" to-port="1"/>
-		<edge from-layer="1018" from-port="2" to-layer="1019" to-port="1"/>
-		<edge from-layer="1019" from-port="2" to-layer="1020" to-port="0"/>
-		<edge from-layer="1020" from-port="2" to-layer="1109" to-port="1"/>
-		<edge from-layer="1020" from-port="2" to-layer="1022" to-port="0"/>
-		<edge from-layer="1021" from-port="0" to-layer="1022" to-port="1"/>
-		<edge from-layer="1022" from-port="2" to-layer="1024" to-port="0"/>
-		<edge from-layer="1023" from-port="0" to-layer="1024" to-port="1"/>
-		<edge from-layer="1024" from-port="2" to-layer="1026" to-port="0"/>
-		<edge from-layer="1025" from-port="0" to-layer="1026" to-port="1"/>
-		<edge from-layer="1026" from-port="2" to-layer="1028" to-port="0"/>
-		<edge from-layer="1027" from-port="0" to-layer="1028" to-port="1"/>
-		<edge from-layer="1028" from-port="2" to-layer="1030" to-port="0"/>
-		<edge from-layer="1028" from-port="2" to-layer="1039" to-port="0"/>
-		<edge from-layer="1029" from-port="0" to-layer="1038" to-port="0"/>
-		<edge from-layer="1030" from-port="1" to-layer="1044" to-port="0"/>
-		<edge from-layer="1030" from-port="1" to-layer="1033" to-port="0"/>
-		<edge from-layer="1030" from-port="1" to-layer="1051" to-port="0"/>
-		<edge from-layer="1031" from-port="0" to-layer="1033" to-port="1"/>
-		<edge from-layer="1032" from-port="0" to-layer="1033" to-port="2"/>
-		<edge from-layer="1033" from-port="3" to-layer="1053" to-port="0"/>
-		<edge from-layer="1033" from-port="3" to-layer="1035" to-port="0"/>
-		<edge from-layer="1034" from-port="0" to-layer="1035" to-port="1"/>
-		<edge from-layer="1035" from-port="2" to-layer="1037" to-port="0"/>
-		<edge from-layer="1036" from-port="0" to-layer="1037" to-port="1"/>
-		<edge from-layer="1037" from-port="2" to-layer="1038" to-port="2"/>
-		<edge from-layer="1038" from-port="3" to-layer="1039" to-port="1"/>
-		<edge from-layer="1039" from-port="2" to-layer="1041" to-port="0"/>
-		<edge from-layer="1040" from-port="0" to-layer="1041" to-port="1"/>
-		<edge from-layer="1041" from-port="2" to-layer="1057" to-port="0"/>
-		<edge from-layer="1042" from-port="0" to-layer="1044" to-port="1"/>
-		<edge from-layer="1043" from-port="0" to-layer="1044" to-port="2"/>
-		<edge from-layer="1044" from-port="3" to-layer="1046" to-port="0"/>
-		<edge from-layer="1045" from-port="0" to-layer="1046" to-port="1"/>
-		<edge from-layer="1046" from-port="2" to-layer="1048" to-port="0"/>
-		<edge from-layer="1047" from-port="0" to-layer="1048" to-port="1"/>
-		<edge from-layer="1048" from-port="2" to-layer="1056" to-port="0"/>
-		<edge from-layer="1049" from-port="0" to-layer="1051" to-port="1"/>
-		<edge from-layer="1050" from-port="0" to-layer="1051" to-port="2"/>
-		<edge from-layer="1051" from-port="3" to-layer="1056" to-port="1"/>
-		<edge from-layer="1052" from-port="0" to-layer="1053" to-port="1"/>
-		<edge from-layer="1053" from-port="2" to-layer="1055" to-port="0"/>
-		<edge from-layer="1054" from-port="0" to-layer="1055" to-port="1"/>
-		<edge from-layer="1055" from-port="2" to-layer="1056" to-port="2"/>
-		<edge from-layer="1056" from-port="3" to-layer="1057" to-port="1"/>
-		<edge from-layer="1057" from-port="2" to-layer="1064" to-port="0"/>
-		<edge from-layer="1058" from-port="0" to-layer="1059" to-port="1"/>
-		<edge from-layer="1059" from-port="2" to-layer="1060" to-port="0"/>
-		<edge from-layer="1060" from-port="2" to-layer="1062" to-port="0"/>
-		<edge from-layer="1061" from-port="0" to-layer="1062" to-port="1"/>
-		<edge from-layer="1062" from-port="2" to-layer="1063" to-port="0"/>
-		<edge from-layer="1063" from-port="2" to-layer="1064" to-port="1"/>
-		<edge from-layer="1064" from-port="2" to-layer="1066" to-port="0"/>
-		<edge from-layer="1065" from-port="0" to-layer="1066" to-port="1"/>
-		<edge from-layer="1066" from-port="2" to-layer="1067" to-port="0"/>
-		<edge from-layer="1067" from-port="1" to-layer="1074" to-port="0"/>
-		<edge from-layer="1068" from-port="0" to-layer="1069" to-port="1"/>
-		<edge from-layer="1069" from-port="2" to-layer="1070" to-port="0"/>
-		<edge from-layer="1070" from-port="2" to-layer="1072" to-port="0"/>
-		<edge from-layer="1071" from-port="0" to-layer="1072" to-port="1"/>
-		<edge from-layer="1072" from-port="2" to-layer="1073" to-port="0"/>
-		<edge from-layer="1073" from-port="2" to-layer="1074" to-port="1"/>
-		<edge from-layer="1074" from-port="2" to-layer="1075" to-port="0"/>
-		<edge from-layer="1074" from-port="2" to-layer="1087" to-port="0"/>
-		<edge from-layer="1075" from-port="1" to-layer="1078" to-port="0"/>
-		<edge from-layer="1075" from-port="1" to-layer="1099" to-port="0"/>
-		<edge from-layer="1075" from-port="1" to-layer="1096" to-port="0"/>
-		<edge from-layer="1075" from-port="1" to-layer="1085" to-port="0"/>
-		<edge from-layer="1076" from-port="0" to-layer="1078" to-port="1"/>
-		<edge from-layer="1077" from-port="0" to-layer="1078" to-port="2"/>
-		<edge from-layer="1078" from-port="3" to-layer="1080" to-port="0"/>
-		<edge from-layer="1078" from-port="3" to-layer="1091" to-port="0"/>
-		<edge from-layer="1079" from-port="0" to-layer="1080" to-port="1"/>
-		<edge from-layer="1080" from-port="2" to-layer="1082" to-port="0"/>
-		<edge from-layer="1081" from-port="0" to-layer="1082" to-port="1"/>
-		<edge from-layer="1082" from-port="2" to-layer="1086" to-port="0"/>
-		<edge from-layer="1083" from-port="0" to-layer="1085" to-port="1"/>
-		<edge from-layer="1084" from-port="0" to-layer="1085" to-port="2"/>
-		<edge from-layer="1085" from-port="3" to-layer="1086" to-port="2"/>
-		<edge from-layer="1086" from-port="3" to-layer="1087" to-port="1"/>
-		<edge from-layer="1087" from-port="2" to-layer="1089" to-port="0"/>
-		<edge from-layer="1088" from-port="0" to-layer="1089" to-port="1"/>
-		<edge from-layer="1089" from-port="2" to-layer="1105" to-port="0"/>
-		<edge from-layer="1090" from-port="0" to-layer="1091" to-port="1"/>
-		<edge from-layer="1091" from-port="2" to-layer="1093" to-port="0"/>
-		<edge from-layer="1092" from-port="0" to-layer="1093" to-port="1"/>
-		<edge from-layer="1093" from-port="2" to-layer="1104" to-port="0"/>
-		<edge from-layer="1094" from-port="0" to-layer="1096" to-port="1"/>
-		<edge from-layer="1095" from-port="0" to-layer="1096" to-port="2"/>
-		<edge from-layer="1096" from-port="3" to-layer="1104" to-port="1"/>
-		<edge from-layer="1097" from-port="0" to-layer="1099" to-port="1"/>
-		<edge from-layer="1098" from-port="0" to-layer="1099" to-port="2"/>
-		<edge from-layer="1099" from-port="3" to-layer="1101" to-port="0"/>
-		<edge from-layer="1100" from-port="0" to-layer="1101" to-port="1"/>
-		<edge from-layer="1101" from-port="2" to-layer="1103" to-port="0"/>
-		<edge from-layer="1102" from-port="0" to-layer="1103" to-port="1"/>
-		<edge from-layer="1103" from-port="2" to-layer="1104" to-port="2"/>
-		<edge from-layer="1104" from-port="3" to-layer="1105" to-port="1"/>
-		<edge from-layer="1105" from-port="2" to-layer="1107" to-port="0"/>
-		<edge from-layer="1106" from-port="0" to-layer="1107" to-port="1"/>
-		<edge from-layer="1107" from-port="2" to-layer="1108" to-port="1"/>
-		<edge from-layer="1108" from-port="2" to-layer="1109" to-port="0"/>
-		<edge from-layer="1109" from-port="2" to-layer="1149" to-port="1"/>
-		<edge from-layer="1109" from-port="2" to-layer="1111" to-port="0"/>
-		<edge from-layer="1110" from-port="0" to-layer="1111" to-port="1"/>
-		<edge from-layer="1111" from-port="2" to-layer="1113" to-port="0"/>
-		<edge from-layer="1112" from-port="0" to-layer="1113" to-port="1"/>
-		<edge from-layer="1113" from-port="2" to-layer="1115" to-port="0"/>
-		<edge from-layer="1114" from-port="0" to-layer="1115" to-port="1"/>
-		<edge from-layer="1115" from-port="2" to-layer="1117" to-port="0"/>
-		<edge from-layer="1116" from-port="0" to-layer="1117" to-port="1"/>
-		<edge from-layer="1117" from-port="2" to-layer="1118" to-port="1"/>
-		<edge from-layer="1118" from-port="2" to-layer="1143" to-port="0"/>
-		<edge from-layer="1118" from-port="2" to-layer="1133" to-port="0"/>
-		<edge from-layer="1118" from-port="2" to-layer="1122" to-port="0"/>
-		<edge from-layer="1119" from-port="0" to-layer="1133" to-port="1"/>
-		<edge from-layer="1120" from-port="0" to-layer="1131" to-port="0"/>
-		<edge from-layer="1121" from-port="0" to-layer="1131" to-port="1"/>
-		<edge from-layer="1122" from-port="1" to-layer="1125" to-port="0"/>
-		<edge from-layer="1123" from-port="0" to-layer="1125" to-port="1"/>
-		<edge from-layer="1124" from-port="0" to-layer="1125" to-port="2"/>
-		<edge from-layer="1125" from-port="3" to-layer="1127" to-port="0"/>
-		<edge from-layer="1126" from-port="0" to-layer="1127" to-port="1"/>
-		<edge from-layer="1127" from-port="2" to-layer="1129" to-port="0"/>
-		<edge from-layer="1128" from-port="0" to-layer="1129" to-port="1"/>
-		<edge from-layer="1129" from-port="2" to-layer="1131" to-port="2"/>
-		<edge from-layer="1129" from-port="2" to-layer="1140" to-port="0"/>
-		<edge from-layer="1129" from-port="2" to-layer="1137" to-port="2"/>
-		<edge from-layer="1130" from-port="0" to-layer="1131" to-port="3"/>
-		<edge from-layer="1131" from-port="4" to-layer="1133" to-port="2"/>
-		<edge from-layer="1132" from-port="0" to-layer="1133" to-port="3"/>
-		<edge from-layer="1133" from-port="4" to-layer="1145" to-port="0"/>
-		<edge from-layer="1134" from-port="0" to-layer="1137" to-port="0"/>
-		<edge from-layer="1135" from-port="0" to-layer="1141" to-port="1"/>
-		<edge from-layer="1135" from-port="0" to-layer="1137" to-port="1"/>
-		<edge from-layer="1136" from-port="0" to-layer="1137" to-port="3"/>
-		<edge from-layer="1136" from-port="0" to-layer="1141" to-port="3"/>
-		<edge from-layer="1137" from-port="4" to-layer="1143" to-port="1"/>
-		<edge from-layer="1138" from-port="0" to-layer="1141" to-port="0"/>
-		<edge from-layer="1139" from-port="0" to-layer="1140" to-port="1"/>
-		<edge from-layer="1140" from-port="2" to-layer="1141" to-port="2"/>
-		<edge from-layer="1141" from-port="4" to-layer="1143" to-port="2"/>
-		<edge from-layer="1142" from-port="0" to-layer="1143" to-port="3"/>
-		<edge from-layer="1143" from-port="4" to-layer="1144" to-port="0"/>
-		<edge from-layer="1144" from-port="1" to-layer="1145" to-port="1"/>
-		<edge from-layer="1145" from-port="2" to-layer="1147" to-port="0"/>
-		<edge from-layer="1146" from-port="0" to-layer="1147" to-port="1"/>
-		<edge from-layer="1147" from-port="2" to-layer="1148" to-port="1"/>
-		<edge from-layer="1148" from-port="2" to-layer="1149" to-port="0"/>
-		<edge from-layer="1149" from-port="2" to-layer="1150" to-port="0"/>
-		<edge from-layer="1150" from-port="2" to-layer="1152" to-port="0"/>
-		<edge from-layer="1151" from-port="0" to-layer="1152" to-port="1"/>
-		<edge from-layer="1152" from-port="2" to-layer="1154" to-port="0"/>
-		<edge from-layer="1153" from-port="0" to-layer="1154" to-port="1"/>
-		<edge from-layer="1154" from-port="2" to-layer="1156" to-port="0"/>
-		<edge from-layer="1155" from-port="0" to-layer="1156" to-port="1"/>
-		<edge from-layer="1156" from-port="2" to-layer="1157" to-port="0"/>
-		<edge from-layer="1157" from-port="2" to-layer="3311" to-port="1"/>
-		<edge from-layer="1157" from-port="2" to-layer="1159" to-port="0"/>
-		<edge from-layer="1158" from-port="0" to-layer="1159" to-port="1"/>
-		<edge from-layer="1159" from-port="2" to-layer="1161" to-port="0"/>
-		<edge from-layer="1160" from-port="0" to-layer="1161" to-port="1"/>
-		<edge from-layer="1161" from-port="2" to-layer="1163" to-port="0"/>
-		<edge from-layer="1161" from-port="2" to-layer="1167" to-port="0"/>
-		<edge from-layer="1161" from-port="2" to-layer="1170" to-port="0"/>
-		<edge from-layer="1161" from-port="2" to-layer="2944" to-port="1"/>
-		<edge from-layer="1162" from-port="0" to-layer="1163" to-port="1"/>
-		<edge from-layer="1163" from-port="2" to-layer="1165" to-port="0"/>
-		<edge from-layer="1164" from-port="0" to-layer="1165" to-port="1"/>
-		<edge from-layer="1165" from-port="2" to-layer="1206" to-port="0"/>
-		<edge from-layer="1166" from-port="0" to-layer="1167" to-port="1"/>
-		<edge from-layer="1167" from-port="2" to-layer="1169" to-port="0"/>
-		<edge from-layer="1168" from-port="0" to-layer="1169" to-port="1"/>
-		<edge from-layer="1169" from-port="2" to-layer="1171" to-port="0"/>
-		<edge from-layer="1170" from-port="1" to-layer="1171" to-port="1"/>
-		<edge from-layer="1171" from-port="2" to-layer="1173" to-port="0"/>
-		<edge from-layer="1172" from-port="0" to-layer="1173" to-port="1"/>
-		<edge from-layer="1173" from-port="2" to-layer="1175" to-port="0"/>
-		<edge from-layer="1174" from-port="0" to-layer="1175" to-port="1"/>
-		<edge from-layer="1175" from-port="2" to-layer="1176" to-port="0"/>
-		<edge from-layer="1176" from-port="1" to-layer="1178" to-port="0"/>
-		<edge from-layer="1177" from-port="0" to-layer="1178" to-port="1"/>
-		<edge from-layer="1178" from-port="2" to-layer="1180" to-port="0"/>
-		<edge from-layer="1179" from-port="0" to-layer="1180" to-port="1"/>
-		<edge from-layer="1180" from-port="2" to-layer="1190" to-port="0"/>
-		<edge from-layer="1181" from-port="1" to-layer="1183" to-port="0"/>
-		<edge from-layer="1182" from-port="0" to-layer="1183" to-port="1"/>
-		<edge from-layer="1183" from-port="2" to-layer="1185" to-port="0"/>
-		<edge from-layer="1184" from-port="0" to-layer="1185" to-port="1"/>
-		<edge from-layer="1185" from-port="2" to-layer="1187" to-port="0"/>
-		<edge from-layer="1186" from-port="0" to-layer="1187" to-port="1"/>
-		<edge from-layer="1187" from-port="2" to-layer="1189" to-port="0"/>
-		<edge from-layer="1188" from-port="0" to-layer="1189" to-port="1"/>
-		<edge from-layer="1189" from-port="2" to-layer="1190" to-port="1"/>
-		<edge from-layer="1190" from-port="2" to-layer="1192" to-port="0"/>
-		<edge from-layer="1190" from-port="2" to-layer="1195" to-port="0"/>
-		<edge from-layer="1191" from-port="0" to-layer="1192" to-port="1"/>
-		<edge from-layer="1192" from-port="2" to-layer="1194" to-port="0"/>
-		<edge from-layer="1193" from-port="0" to-layer="1194" to-port="1"/>
-		<edge from-layer="1194" from-port="2" to-layer="1196" to-port="0"/>
-		<edge from-layer="1195" from-port="1" to-layer="1196" to-port="1"/>
-		<edge from-layer="1196" from-port="2" to-layer="1198" to-port="0"/>
-		<edge from-layer="1197" from-port="0" to-layer="1198" to-port="1"/>
-		<edge from-layer="1198" from-port="2" to-layer="1200" to-port="0"/>
-		<edge from-layer="1199" from-port="0" to-layer="1200" to-port="1"/>
-		<edge from-layer="1200" from-port="2" to-layer="1201" to-port="0"/>
-		<edge from-layer="1201" from-port="1" to-layer="1203" to-port="0"/>
-		<edge from-layer="1202" from-port="0" to-layer="1203" to-port="1"/>
-		<edge from-layer="1203" from-port="2" to-layer="1205" to-port="0"/>
-		<edge from-layer="1204" from-port="0" to-layer="1205" to-port="1"/>
-		<edge from-layer="1205" from-port="2" to-layer="1206" to-port="1"/>
-		<edge from-layer="1206" from-port="2" to-layer="1211" to-port="0"/>
-		<edge from-layer="1206" from-port="2" to-layer="1430" to-port="1"/>
-		<edge from-layer="1206" from-port="2" to-layer="1208" to-port="0"/>
-		<edge from-layer="1207" from-port="0" to-layer="1208" to-port="1"/>
-		<edge from-layer="1208" from-port="2" to-layer="1210" to-port="0"/>
-		<edge from-layer="1209" from-port="0" to-layer="1210" to-port="1"/>
-		<edge from-layer="1210" from-port="2" to-layer="1212" to-port="0"/>
-		<edge from-layer="1211" from-port="1" to-layer="1212" to-port="1"/>
-		<edge from-layer="1212" from-port="2" to-layer="1214" to-port="0"/>
-		<edge from-layer="1213" from-port="0" to-layer="1214" to-port="1"/>
-		<edge from-layer="1214" from-port="2" to-layer="1216" to-port="0"/>
-		<edge from-layer="1215" from-port="0" to-layer="1216" to-port="1"/>
-		<edge from-layer="1216" from-port="2" to-layer="1218" to-port="0"/>
-		<edge from-layer="1217" from-port="0" to-layer="1218" to-port="1"/>
-		<edge from-layer="1218" from-port="2" to-layer="1220" to-port="0"/>
-		<edge from-layer="1219" from-port="0" to-layer="1220" to-port="1"/>
-		<edge from-layer="1220" from-port="2" to-layer="1222" to-port="0"/>
-		<edge from-layer="1221" from-port="0" to-layer="1222" to-port="1"/>
-		<edge from-layer="1222" from-port="2" to-layer="1224" to-port="0"/>
-		<edge from-layer="1223" from-port="0" to-layer="1224" to-port="1"/>
-		<edge from-layer="1223" from-port="0" to-layer="1488" to-port="1"/>
-		<edge from-layer="1224" from-port="2" to-layer="1226" to-port="0"/>
-		<edge from-layer="1224" from-port="2" to-layer="1290" to-port="1"/>
-		<edge from-layer="1225" from-port="0" to-layer="1226" to-port="1"/>
-		<edge from-layer="1226" from-port="2" to-layer="1228" to-port="0"/>
-		<edge from-layer="1227" from-port="0" to-layer="1228" to-port="1"/>
-		<edge from-layer="1228" from-port="2" to-layer="1230" to-port="0"/>
-		<edge from-layer="1229" from-port="0" to-layer="1230" to-port="1"/>
-		<edge from-layer="1230" from-port="2" to-layer="1232" to-port="0"/>
-		<edge from-layer="1230" from-port="2" to-layer="1240" to-port="0"/>
-		<edge from-layer="1230" from-port="2" to-layer="1250" to-port="0"/>
-		<edge from-layer="1231" from-port="0" to-layer="1232" to-port="1"/>
-		<edge from-layer="1232" from-port="2" to-layer="1234" to-port="0"/>
-		<edge from-layer="1233" from-port="0" to-layer="1234" to-port="1"/>
-		<edge from-layer="1233" from-port="0" to-layer="1503" to-port="1"/>
-		<edge from-layer="1233" from-port="0" to-layer="1241" to-port="1"/>
-		<edge from-layer="1233" from-port="0" to-layer="1513" to-port="1"/>
-		<edge from-layer="1233" from-port="0" to-layer="1251" to-port="1"/>
-		<edge from-layer="1233" from-port="0" to-layer="1497" to-port="1"/>
-		<edge from-layer="1234" from-port="2" to-layer="1236" to-port="0"/>
-		<edge from-layer="1235" from-port="0" to-layer="1236" to-port="1"/>
-		<edge from-layer="1236" from-port="2" to-layer="1238" to-port="0"/>
-		<edge from-layer="1237" from-port="0" to-layer="1238" to-port="1"/>
-		<edge from-layer="1237" from-port="0" to-layer="1500" to-port="1"/>
-		<edge from-layer="1237" from-port="0" to-layer="1254" to-port="1"/>
-		<edge from-layer="1237" from-port="0" to-layer="1244" to-port="1"/>
-		<edge from-layer="1237" from-port="0" to-layer="1516" to-port="1"/>
-		<edge from-layer="1237" from-port="0" to-layer="1506" to-port="1"/>
-		<edge from-layer="1238" from-port="2" to-layer="1245" to-port="0"/>
-		<edge from-layer="1239" from-port="0" to-layer="1240" to-port="1"/>
-		<edge from-layer="1240" from-port="2" to-layer="1241" to-port="0"/>
-		<edge from-layer="1241" from-port="2" to-layer="1243" to-port="0"/>
-		<edge from-layer="1242" from-port="0" to-layer="1243" to-port="1"/>
-		<edge from-layer="1243" from-port="2" to-layer="1244" to-port="0"/>
-		<edge from-layer="1244" from-port="2" to-layer="1245" to-port="1"/>
-		<edge from-layer="1245" from-port="2" to-layer="1247" to-port="0"/>
-		<edge from-layer="1246" from-port="0" to-layer="1247" to-port="1"/>
-		<edge from-layer="1247" from-port="2" to-layer="1248" to-port="0"/>
-		<edge from-layer="1248" from-port="1" to-layer="1255" to-port="0"/>
-		<edge from-layer="1249" from-port="0" to-layer="1250" to-port="1"/>
-		<edge from-layer="1250" from-port="2" to-layer="1251" to-port="0"/>
-		<edge from-layer="1251" from-port="2" to-layer="1253" to-port="0"/>
-		<edge from-layer="1252" from-port="0" to-layer="1253" to-port="1"/>
-		<edge from-layer="1253" from-port="2" to-layer="1254" to-port="0"/>
-		<edge from-layer="1254" from-port="2" to-layer="1255" to-port="1"/>
-		<edge from-layer="1255" from-port="2" to-layer="1256" to-port="0"/>
-		<edge from-layer="1255" from-port="2" to-layer="1268" to-port="0"/>
-		<edge from-layer="1256" from-port="1" to-layer="1266" to-port="0"/>
-		<edge from-layer="1256" from-port="1" to-layer="1277" to-port="0"/>
-		<edge from-layer="1256" from-port="1" to-layer="1259" to-port="0"/>
-		<edge from-layer="1256" from-port="1" to-layer="1280" to-port="0"/>
-		<edge from-layer="1257" from-port="0" to-layer="1259" to-port="1"/>
-		<edge from-layer="1258" from-port="0" to-layer="1259" to-port="2"/>
-		<edge from-layer="1259" from-port="3" to-layer="1261" to-port="0"/>
-		<edge from-layer="1259" from-port="3" to-layer="1272" to-port="0"/>
-		<edge from-layer="1260" from-port="0" to-layer="1261" to-port="1"/>
-		<edge from-layer="1261" from-port="2" to-layer="1263" to-port="0"/>
-		<edge from-layer="1262" from-port="0" to-layer="1263" to-port="1"/>
-		<edge from-layer="1263" from-port="2" to-layer="1267" to-port="0"/>
-		<edge from-layer="1264" from-port="0" to-layer="1266" to-port="1"/>
-		<edge from-layer="1265" from-port="0" to-layer="1266" to-port="2"/>
-		<edge from-layer="1266" from-port="3" to-layer="1267" to-port="2"/>
-		<edge from-layer="1267" from-port="3" to-layer="1268" to-port="1"/>
-		<edge from-layer="1268" from-port="2" to-layer="1270" to-port="0"/>
-		<edge from-layer="1269" from-port="0" to-layer="1270" to-port="1"/>
-		<edge from-layer="1270" from-port="2" to-layer="1286" to-port="0"/>
-		<edge from-layer="1271" from-port="0" to-layer="1272" to-port="1"/>
-		<edge from-layer="1272" from-port="2" to-layer="1274" to-port="0"/>
-		<edge from-layer="1273" from-port="0" to-layer="1274" to-port="1"/>
-		<edge from-layer="1274" from-port="2" to-layer="1285" to-port="0"/>
-		<edge from-layer="1275" from-port="0" to-layer="1277" to-port="1"/>
-		<edge from-layer="1276" from-port="0" to-layer="1277" to-port="2"/>
-		<edge from-layer="1277" from-port="3" to-layer="1285" to-port="1"/>
-		<edge from-layer="1278" from-port="0" to-layer="1280" to-port="1"/>
-		<edge from-layer="1279" from-port="0" to-layer="1280" to-port="2"/>
-		<edge from-layer="1280" from-port="3" to-layer="1282" to-port="0"/>
-		<edge from-layer="1281" from-port="0" to-layer="1282" to-port="1"/>
-		<edge from-layer="1282" from-port="2" to-layer="1284" to-port="0"/>
-		<edge from-layer="1283" from-port="0" to-layer="1284" to-port="1"/>
-		<edge from-layer="1284" from-port="2" to-layer="1285" to-port="2"/>
-		<edge from-layer="1285" from-port="3" to-layer="1286" to-port="1"/>
-		<edge from-layer="1286" from-port="2" to-layer="1288" to-port="0"/>
-		<edge from-layer="1287" from-port="0" to-layer="1288" to-port="1"/>
-		<edge from-layer="1288" from-port="2" to-layer="1289" to-port="1"/>
-		<edge from-layer="1289" from-port="2" to-layer="1290" to-port="0"/>
-		<edge from-layer="1290" from-port="2" to-layer="1381" to-port="1"/>
-		<edge from-layer="1290" from-port="2" to-layer="1292" to-port="0"/>
-		<edge from-layer="1291" from-port="0" to-layer="1292" to-port="1"/>
-		<edge from-layer="1292" from-port="2" to-layer="1294" to-port="0"/>
-		<edge from-layer="1293" from-port="0" to-layer="1294" to-port="1"/>
-		<edge from-layer="1294" from-port="2" to-layer="1296" to-port="0"/>
-		<edge from-layer="1295" from-port="0" to-layer="1296" to-port="1"/>
-		<edge from-layer="1296" from-port="2" to-layer="1298" to-port="0"/>
-		<edge from-layer="1297" from-port="0" to-layer="1298" to-port="1"/>
-		<edge from-layer="1298" from-port="2" to-layer="1309" to-port="0"/>
-		<edge from-layer="1298" from-port="2" to-layer="1300" to-port="0"/>
-		<edge from-layer="1299" from-port="0" to-layer="1308" to-port="0"/>
-		<edge from-layer="1300" from-port="1" to-layer="1321" to-port="0"/>
-		<edge from-layer="1300" from-port="1" to-layer="1314" to-port="0"/>
-		<edge from-layer="1300" from-port="1" to-layer="1303" to-port="0"/>
-		<edge from-layer="1301" from-port="0" to-layer="1303" to-port="1"/>
-		<edge from-layer="1302" from-port="0" to-layer="1303" to-port="2"/>
-		<edge from-layer="1303" from-port="3" to-layer="1323" to-port="0"/>
-		<edge from-layer="1303" from-port="3" to-layer="1305" to-port="0"/>
-		<edge from-layer="1304" from-port="0" to-layer="1305" to-port="1"/>
-		<edge from-layer="1305" from-port="2" to-layer="1307" to-port="0"/>
-		<edge from-layer="1306" from-port="0" to-layer="1307" to-port="1"/>
-		<edge from-layer="1307" from-port="2" to-layer="1308" to-port="2"/>
-		<edge from-layer="1308" from-port="3" to-layer="1309" to-port="1"/>
-		<edge from-layer="1309" from-port="2" to-layer="1311" to-port="0"/>
-		<edge from-layer="1310" from-port="0" to-layer="1311" to-port="1"/>
-		<edge from-layer="1311" from-port="2" to-layer="1327" to-port="0"/>
-		<edge from-layer="1312" from-port="0" to-layer="1314" to-port="1"/>
-		<edge from-layer="1313" from-port="0" to-layer="1314" to-port="2"/>
-		<edge from-layer="1314" from-port="3" to-layer="1316" to-port="0"/>
-		<edge from-layer="1315" from-port="0" to-layer="1316" to-port="1"/>
-		<edge from-layer="1316" from-port="2" to-layer="1318" to-port="0"/>
-		<edge from-layer="1317" from-port="0" to-layer="1318" to-port="1"/>
-		<edge from-layer="1318" from-port="2" to-layer="1326" to-port="0"/>
-		<edge from-layer="1319" from-port="0" to-layer="1321" to-port="1"/>
-		<edge from-layer="1320" from-port="0" to-layer="1321" to-port="2"/>
-		<edge from-layer="1321" from-port="3" to-layer="1326" to-port="1"/>
-		<edge from-layer="1322" from-port="0" to-layer="1323" to-port="1"/>
-		<edge from-layer="1323" from-port="2" to-layer="1325" to-port="0"/>
-		<edge from-layer="1324" from-port="0" to-layer="1325" to-port="1"/>
-		<edge from-layer="1325" from-port="2" to-layer="1326" to-port="2"/>
-		<edge from-layer="1326" from-port="3" to-layer="1327" to-port="1"/>
-		<edge from-layer="1327" from-port="2" to-layer="1336" to-port="0"/>
-		<edge from-layer="1328" from-port="0" to-layer="1329" to-port="1"/>
-		<edge from-layer="1329" from-port="2" to-layer="1331" to-port="0"/>
-		<edge from-layer="1330" from-port="0" to-layer="2843" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="1331" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="1342" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="3211" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="3201" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="1602" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="2853" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="2495" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="2485" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="1950" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="1940" to-port="1"/>
-		<edge from-layer="1330" from-port="0" to-layer="1592" to-port="1"/>
-		<edge from-layer="1331" from-port="2" to-layer="1333" to-port="0"/>
-		<edge from-layer="1332" from-port="0" to-layer="1333" to-port="1"/>
-		<edge from-layer="1333" from-port="2" to-layer="1335" to-port="0"/>
-		<edge from-layer="1334" from-port="0" to-layer="1605" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="2846" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="2498" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="2488" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="1595" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="1345" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="3204" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="3214" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="1953" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="1943" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="2856" to-port="1"/>
-		<edge from-layer="1334" from-port="0" to-layer="1335" to-port="1"/>
-		<edge from-layer="1335" from-port="2" to-layer="1336" to-port="1"/>
-		<edge from-layer="1336" from-port="2" to-layer="1338" to-port="0"/>
-		<edge from-layer="1337" from-port="0" to-layer="1338" to-port="1"/>
-		<edge from-layer="1338" from-port="2" to-layer="1339" to-port="0"/>
-		<edge from-layer="1339" from-port="1" to-layer="1346" to-port="0"/>
-		<edge from-layer="1340" from-port="0" to-layer="1341" to-port="1"/>
-		<edge from-layer="1341" from-port="2" to-layer="1342" to-port="0"/>
-		<edge from-layer="1342" from-port="2" to-layer="1344" to-port="0"/>
-		<edge from-layer="1343" from-port="0" to-layer="1344" to-port="1"/>
-		<edge from-layer="1344" from-port="2" to-layer="1345" to-port="0"/>
-		<edge from-layer="1345" from-port="2" to-layer="1346" to-port="1"/>
-		<edge from-layer="1346" from-port="2" to-layer="1347" to-port="0"/>
-		<edge from-layer="1346" from-port="2" to-layer="1359" to-port="0"/>
-		<edge from-layer="1347" from-port="1" to-layer="1368" to-port="0"/>
-		<edge from-layer="1347" from-port="1" to-layer="1371" to-port="0"/>
-		<edge from-layer="1347" from-port="1" to-layer="1350" to-port="0"/>
-		<edge from-layer="1347" from-port="1" to-layer="1357" to-port="0"/>
-		<edge from-layer="1348" from-port="0" to-layer="1350" to-port="1"/>
-		<edge from-layer="1349" from-port="0" to-layer="1350" to-port="2"/>
-		<edge from-layer="1350" from-port="3" to-layer="1363" to-port="0"/>
-		<edge from-layer="1350" from-port="3" to-layer="1352" to-port="0"/>
-		<edge from-layer="1351" from-port="0" to-layer="1352" to-port="1"/>
-		<edge from-layer="1352" from-port="2" to-layer="1354" to-port="0"/>
-		<edge from-layer="1353" from-port="0" to-layer="1354" to-port="1"/>
-		<edge from-layer="1354" from-port="2" to-layer="1358" to-port="0"/>
-		<edge from-layer="1355" from-port="0" to-layer="1357" to-port="1"/>
-		<edge from-layer="1356" from-port="0" to-layer="1357" to-port="2"/>
-		<edge from-layer="1357" from-port="3" to-layer="1358" to-port="2"/>
-		<edge from-layer="1358" from-port="3" to-layer="1359" to-port="1"/>
-		<edge from-layer="1359" from-port="2" to-layer="1361" to-port="0"/>
-		<edge from-layer="1360" from-port="0" to-layer="1361" to-port="1"/>
-		<edge from-layer="1361" from-port="2" to-layer="1377" to-port="0"/>
-		<edge from-layer="1362" from-port="0" to-layer="1363" to-port="1"/>
-		<edge from-layer="1363" from-port="2" to-layer="1365" to-port="0"/>
-		<edge from-layer="1364" from-port="0" to-layer="1365" to-port="1"/>
-		<edge from-layer="1365" from-port="2" to-layer="1376" to-port="0"/>
-		<edge from-layer="1366" from-port="0" to-layer="1368" to-port="1"/>
-		<edge from-layer="1367" from-port="0" to-layer="1368" to-port="2"/>
-		<edge from-layer="1368" from-port="3" to-layer="1376" to-port="1"/>
-		<edge from-layer="1369" from-port="0" to-layer="1371" to-port="1"/>
-		<edge from-layer="1370" from-port="0" to-layer="1371" to-port="2"/>
-		<edge from-layer="1371" from-port="3" to-layer="1373" to-port="0"/>
-		<edge from-layer="1372" from-port="0" to-layer="1373" to-port="1"/>
-		<edge from-layer="1373" from-port="2" to-layer="1375" to-port="0"/>
-		<edge from-layer="1374" from-port="0" to-layer="1375" to-port="1"/>
-		<edge from-layer="1375" from-port="2" to-layer="1376" to-port="2"/>
-		<edge from-layer="1376" from-port="3" to-layer="1377" to-port="1"/>
-		<edge from-layer="1377" from-port="2" to-layer="1379" to-port="0"/>
-		<edge from-layer="1378" from-port="0" to-layer="1379" to-port="1"/>
-		<edge from-layer="1379" from-port="2" to-layer="1380" to-port="1"/>
-		<edge from-layer="1380" from-port="2" to-layer="1381" to-port="0"/>
-		<edge from-layer="1381" from-port="2" to-layer="1383" to-port="0"/>
-		<edge from-layer="1381" from-port="2" to-layer="1421" to-port="1"/>
-		<edge from-layer="1382" from-port="0" to-layer="1383" to-port="1"/>
-		<edge from-layer="1383" from-port="2" to-layer="1385" to-port="0"/>
-		<edge from-layer="1384" from-port="0" to-layer="1385" to-port="1"/>
-		<edge from-layer="1385" from-port="2" to-layer="1387" to-port="0"/>
-		<edge from-layer="1386" from-port="0" to-layer="1387" to-port="1"/>
-		<edge from-layer="1387" from-port="2" to-layer="1389" to-port="0"/>
-		<edge from-layer="1388" from-port="0" to-layer="1389" to-port="1"/>
-		<edge from-layer="1389" from-port="2" to-layer="1390" to-port="1"/>
-		<edge from-layer="1390" from-port="2" to-layer="1394" to-port="0"/>
-		<edge from-layer="1390" from-port="2" to-layer="1415" to-port="0"/>
-		<edge from-layer="1390" from-port="2" to-layer="1405" to-port="0"/>
-		<edge from-layer="1391" from-port="0" to-layer="1405" to-port="1"/>
-		<edge from-layer="1392" from-port="0" to-layer="1403" to-port="0"/>
-		<edge from-layer="1393" from-port="0" to-layer="1403" to-port="1"/>
-		<edge from-layer="1394" from-port="1" to-layer="1397" to-port="0"/>
-		<edge from-layer="1395" from-port="0" to-layer="1397" to-port="1"/>
-		<edge from-layer="1396" from-port="0" to-layer="1397" to-port="2"/>
-		<edge from-layer="1397" from-port="3" to-layer="1399" to-port="0"/>
-		<edge from-layer="1398" from-port="0" to-layer="1399" to-port="1"/>
-		<edge from-layer="1399" from-port="2" to-layer="1401" to-port="0"/>
-		<edge from-layer="1400" from-port="0" to-layer="1401" to-port="1"/>
-		<edge from-layer="1401" from-port="2" to-layer="1403" to-port="2"/>
-		<edge from-layer="1401" from-port="2" to-layer="1409" to-port="2"/>
-		<edge from-layer="1401" from-port="2" to-layer="1412" to-port="0"/>
-		<edge from-layer="1402" from-port="0" to-layer="1403" to-port="3"/>
-		<edge from-layer="1403" from-port="4" to-layer="1405" to-port="2"/>
-		<edge from-layer="1404" from-port="0" to-layer="1405" to-port="3"/>
-		<edge from-layer="1405" from-port="4" to-layer="1417" to-port="0"/>
-		<edge from-layer="1406" from-port="0" to-layer="1409" to-port="0"/>
-		<edge from-layer="1407" from-port="0" to-layer="1409" to-port="1"/>
-		<edge from-layer="1407" from-port="0" to-layer="1413" to-port="1"/>
-		<edge from-layer="1408" from-port="0" to-layer="1409" to-port="3"/>
-		<edge from-layer="1408" from-port="0" to-layer="1413" to-port="3"/>
-		<edge from-layer="1409" from-port="4" to-layer="1415" to-port="1"/>
-		<edge from-layer="1410" from-port="0" to-layer="1413" to-port="0"/>
-		<edge from-layer="1411" from-port="0" to-layer="1412" to-port="1"/>
-		<edge from-layer="1412" from-port="2" to-layer="1413" to-port="2"/>
-		<edge from-layer="1413" from-port="4" to-layer="1415" to-port="2"/>
-		<edge from-layer="1414" from-port="0" to-layer="1415" to-port="3"/>
-		<edge from-layer="1415" from-port="4" to-layer="1416" to-port="0"/>
-		<edge from-layer="1416" from-port="1" to-layer="1417" to-port="1"/>
-		<edge from-layer="1417" from-port="2" to-layer="1419" to-port="0"/>
-		<edge from-layer="1418" from-port="0" to-layer="1419" to-port="1"/>
-		<edge from-layer="1419" from-port="2" to-layer="1420" to-port="1"/>
-		<edge from-layer="1420" from-port="2" to-layer="1421" to-port="0"/>
-		<edge from-layer="1421" from-port="2" to-layer="1423" to-port="0"/>
-		<edge from-layer="1422" from-port="0" to-layer="1423" to-port="1"/>
-		<edge from-layer="1422" from-port="0" to-layer="1682" to-port="1"/>
-		<edge from-layer="1423" from-port="2" to-layer="1425" to-port="0"/>
-		<edge from-layer="1424" from-port="0" to-layer="1425" to-port="1"/>
-		<edge from-layer="1425" from-port="2" to-layer="1427" to-port="0"/>
-		<edge from-layer="1426" from-port="0" to-layer="1427" to-port="1"/>
-		<edge from-layer="1427" from-port="2" to-layer="1429" to-port="0"/>
-		<edge from-layer="1428" from-port="0" to-layer="1429" to-port="1"/>
-		<edge from-layer="1429" from-port="2" to-layer="1430" to-port="0"/>
-		<edge from-layer="1430" from-port="2" to-layer="1471" to-port="0"/>
-		<edge from-layer="1430" from-port="2" to-layer="2586" to-port="1"/>
-		<edge from-layer="1430" from-port="2" to-layer="1435" to-port="0"/>
-		<edge from-layer="1430" from-port="2" to-layer="1432" to-port="0"/>
-		<edge from-layer="1431" from-port="0" to-layer="1432" to-port="1"/>
-		<edge from-layer="1432" from-port="2" to-layer="1434" to-port="0"/>
-		<edge from-layer="1433" from-port="0" to-layer="1434" to-port="1"/>
-		<edge from-layer="1434" from-port="2" to-layer="1436" to-port="0"/>
-		<edge from-layer="1435" from-port="1" to-layer="1436" to-port="1"/>
-		<edge from-layer="1436" from-port="2" to-layer="1438" to-port="0"/>
-		<edge from-layer="1437" from-port="0" to-layer="1438" to-port="1"/>
-		<edge from-layer="1438" from-port="2" to-layer="1440" to-port="0"/>
-		<edge from-layer="1439" from-port="0" to-layer="1440" to-port="1"/>
-		<edge from-layer="1440" from-port="2" to-layer="1441" to-port="0"/>
-		<edge from-layer="1441" from-port="1" to-layer="1443" to-port="0"/>
-		<edge from-layer="1442" from-port="0" to-layer="1443" to-port="1"/>
-		<edge from-layer="1443" from-port="2" to-layer="1445" to-port="0"/>
-		<edge from-layer="1444" from-port="0" to-layer="1445" to-port="1"/>
-		<edge from-layer="1445" from-port="2" to-layer="1455" to-port="0"/>
-		<edge from-layer="1446" from-port="1" to-layer="1448" to-port="0"/>
-		<edge from-layer="1447" from-port="0" to-layer="1448" to-port="1"/>
-		<edge from-layer="1448" from-port="2" to-layer="1450" to-port="0"/>
-		<edge from-layer="1449" from-port="0" to-layer="1450" to-port="1"/>
-		<edge from-layer="1450" from-port="2" to-layer="1452" to-port="0"/>
-		<edge from-layer="1451" from-port="0" to-layer="1452" to-port="1"/>
-		<edge from-layer="1452" from-port="2" to-layer="1454" to-port="0"/>
-		<edge from-layer="1453" from-port="0" to-layer="1454" to-port="1"/>
-		<edge from-layer="1454" from-port="2" to-layer="1455" to-port="1"/>
-		<edge from-layer="1455" from-port="2" to-layer="1457" to-port="0"/>
-		<edge from-layer="1455" from-port="2" to-layer="1460" to-port="0"/>
-		<edge from-layer="1456" from-port="0" to-layer="1457" to-port="1"/>
-		<edge from-layer="1457" from-port="2" to-layer="1459" to-port="0"/>
-		<edge from-layer="1458" from-port="0" to-layer="1459" to-port="1"/>
-		<edge from-layer="1459" from-port="2" to-layer="1461" to-port="0"/>
-		<edge from-layer="1460" from-port="1" to-layer="1461" to-port="1"/>
-		<edge from-layer="1461" from-port="2" to-layer="1463" to-port="0"/>
-		<edge from-layer="1462" from-port="0" to-layer="1463" to-port="1"/>
-		<edge from-layer="1463" from-port="2" to-layer="1465" to-port="0"/>
-		<edge from-layer="1464" from-port="0" to-layer="1465" to-port="1"/>
-		<edge from-layer="1465" from-port="2" to-layer="1466" to-port="0"/>
-		<edge from-layer="1466" from-port="1" to-layer="1468" to-port="0"/>
-		<edge from-layer="1467" from-port="0" to-layer="1468" to-port="1"/>
-		<edge from-layer="1468" from-port="2" to-layer="1470" to-port="0"/>
-		<edge from-layer="1469" from-port="0" to-layer="1470" to-port="1"/>
-		<edge from-layer="1470" from-port="2" to-layer="1471" to-port="1"/>
-		<edge from-layer="1471" from-port="2" to-layer="1689" to-port="1"/>
-		<edge from-layer="1471" from-port="2" to-layer="1476" to-port="0"/>
-		<edge from-layer="1471" from-port="2" to-layer="1473" to-port="0"/>
-		<edge from-layer="1472" from-port="0" to-layer="1473" to-port="1"/>
-		<edge from-layer="1473" from-port="2" to-layer="1475" to-port="0"/>
-		<edge from-layer="1474" from-port="0" to-layer="1475" to-port="1"/>
-		<edge from-layer="1475" from-port="2" to-layer="1477" to-port="0"/>
-		<edge from-layer="1476" from-port="1" to-layer="1477" to-port="1"/>
-		<edge from-layer="1477" from-port="2" to-layer="1479" to-port="0"/>
-		<edge from-layer="1478" from-port="0" to-layer="1479" to-port="1"/>
-		<edge from-layer="1479" from-port="2" to-layer="1481" to-port="0"/>
-		<edge from-layer="1480" from-port="0" to-layer="1481" to-port="1"/>
-		<edge from-layer="1481" from-port="2" to-layer="1483" to-port="0"/>
-		<edge from-layer="1482" from-port="0" to-layer="1483" to-port="1"/>
-		<edge from-layer="1483" from-port="2" to-layer="1485" to-port="0"/>
-		<edge from-layer="1484" from-port="0" to-layer="1485" to-port="1"/>
-		<edge from-layer="1485" from-port="2" to-layer="1487" to-port="0"/>
-		<edge from-layer="1486" from-port="0" to-layer="1487" to-port="1"/>
-		<edge from-layer="1487" from-port="2" to-layer="1488" to-port="0"/>
-		<edge from-layer="1488" from-port="2" to-layer="1552" to-port="1"/>
-		<edge from-layer="1488" from-port="2" to-layer="1490" to-port="0"/>
-		<edge from-layer="1489" from-port="0" to-layer="1490" to-port="1"/>
-		<edge from-layer="1490" from-port="2" to-layer="1492" to-port="0"/>
-		<edge from-layer="1491" from-port="0" to-layer="1492" to-port="1"/>
-		<edge from-layer="1492" from-port="2" to-layer="1494" to-port="0"/>
-		<edge from-layer="1493" from-port="0" to-layer="1494" to-port="1"/>
-		<edge from-layer="1494" from-port="2" to-layer="1512" to-port="0"/>
-		<edge from-layer="1494" from-port="2" to-layer="1502" to-port="0"/>
-		<edge from-layer="1494" from-port="2" to-layer="1496" to-port="0"/>
-		<edge from-layer="1495" from-port="0" to-layer="1496" to-port="1"/>
-		<edge from-layer="1496" from-port="2" to-layer="1497" to-port="0"/>
-		<edge from-layer="1497" from-port="2" to-layer="1499" to-port="0"/>
-		<edge from-layer="1498" from-port="0" to-layer="1499" to-port="1"/>
-		<edge from-layer="1499" from-port="2" to-layer="1500" to-port="0"/>
-		<edge from-layer="1500" from-port="2" to-layer="1507" to-port="0"/>
-		<edge from-layer="1501" from-port="0" to-layer="1502" to-port="1"/>
-		<edge from-layer="1502" from-port="2" to-layer="1503" to-port="0"/>
-		<edge from-layer="1503" from-port="2" to-layer="1505" to-port="0"/>
-		<edge from-layer="1504" from-port="0" to-layer="1505" to-port="1"/>
-		<edge from-layer="1505" from-port="2" to-layer="1506" to-port="0"/>
-		<edge from-layer="1506" from-port="2" to-layer="1507" to-port="1"/>
-		<edge from-layer="1507" from-port="2" to-layer="1509" to-port="0"/>
-		<edge from-layer="1508" from-port="0" to-layer="1509" to-port="1"/>
-		<edge from-layer="1509" from-port="2" to-layer="1510" to-port="0"/>
-		<edge from-layer="1510" from-port="1" to-layer="1517" to-port="0"/>
-		<edge from-layer="1511" from-port="0" to-layer="1512" to-port="1"/>
-		<edge from-layer="1512" from-port="2" to-layer="1513" to-port="0"/>
-		<edge from-layer="1513" from-port="2" to-layer="1515" to-port="0"/>
-		<edge from-layer="1514" from-port="0" to-layer="1515" to-port="1"/>
-		<edge from-layer="1515" from-port="2" to-layer="1516" to-port="0"/>
-		<edge from-layer="1516" from-port="2" to-layer="1517" to-port="1"/>
-		<edge from-layer="1517" from-port="2" to-layer="1518" to-port="0"/>
-		<edge from-layer="1517" from-port="2" to-layer="1530" to-port="0"/>
-		<edge from-layer="1518" from-port="1" to-layer="1542" to-port="0"/>
-		<edge from-layer="1518" from-port="1" to-layer="1521" to-port="0"/>
-		<edge from-layer="1518" from-port="1" to-layer="1528" to-port="0"/>
-		<edge from-layer="1518" from-port="1" to-layer="1539" to-port="0"/>
-		<edge from-layer="1519" from-port="0" to-layer="1521" to-port="1"/>
-		<edge from-layer="1520" from-port="0" to-layer="1521" to-port="2"/>
-		<edge from-layer="1521" from-port="3" to-layer="1523" to-port="0"/>
-		<edge from-layer="1521" from-port="3" to-layer="1534" to-port="0"/>
-		<edge from-layer="1522" from-port="0" to-layer="1523" to-port="1"/>
-		<edge from-layer="1523" from-port="2" to-layer="1525" to-port="0"/>
-		<edge from-layer="1524" from-port="0" to-layer="1525" to-port="1"/>
-		<edge from-layer="1525" from-port="2" to-layer="1529" to-port="0"/>
-		<edge from-layer="1526" from-port="0" to-layer="1528" to-port="1"/>
-		<edge from-layer="1527" from-port="0" to-layer="1528" to-port="2"/>
-		<edge from-layer="1528" from-port="3" to-layer="1529" to-port="2"/>
-		<edge from-layer="1529" from-port="3" to-layer="1530" to-port="1"/>
-		<edge from-layer="1530" from-port="2" to-layer="1532" to-port="0"/>
-		<edge from-layer="1531" from-port="0" to-layer="1532" to-port="1"/>
-		<edge from-layer="1532" from-port="2" to-layer="1548" to-port="0"/>
-		<edge from-layer="1533" from-port="0" to-layer="1534" to-port="1"/>
-		<edge from-layer="1534" from-port="2" to-layer="1536" to-port="0"/>
-		<edge from-layer="1535" from-port="0" to-layer="1536" to-port="1"/>
-		<edge from-layer="1536" from-port="2" to-layer="1547" to-port="0"/>
-		<edge from-layer="1537" from-port="0" to-layer="1539" to-port="1"/>
-		<edge from-layer="1538" from-port="0" to-layer="1539" to-port="2"/>
-		<edge from-layer="1539" from-port="3" to-layer="1547" to-port="1"/>
-		<edge from-layer="1540" from-port="0" to-layer="1542" to-port="1"/>
-		<edge from-layer="1541" from-port="0" to-layer="1542" to-port="2"/>
-		<edge from-layer="1542" from-port="3" to-layer="1544" to-port="0"/>
-		<edge from-layer="1543" from-port="0" to-layer="1544" to-port="1"/>
-		<edge from-layer="1544" from-port="2" to-layer="1546" to-port="0"/>
-		<edge from-layer="1545" from-port="0" to-layer="1546" to-port="1"/>
-		<edge from-layer="1546" from-port="2" to-layer="1547" to-port="2"/>
-		<edge from-layer="1547" from-port="3" to-layer="1548" to-port="1"/>
-		<edge from-layer="1548" from-port="2" to-layer="1550" to-port="0"/>
-		<edge from-layer="1549" from-port="0" to-layer="1550" to-port="1"/>
-		<edge from-layer="1550" from-port="2" to-layer="1551" to-port="1"/>
-		<edge from-layer="1551" from-port="2" to-layer="1552" to-port="0"/>
-		<edge from-layer="1552" from-port="2" to-layer="1554" to-port="0"/>
-		<edge from-layer="1552" from-port="2" to-layer="1641" to-port="1"/>
-		<edge from-layer="1553" from-port="0" to-layer="1554" to-port="1"/>
-		<edge from-layer="1554" from-port="2" to-layer="1556" to-port="0"/>
-		<edge from-layer="1555" from-port="0" to-layer="1556" to-port="1"/>
-		<edge from-layer="1556" from-port="2" to-layer="1558" to-port="0"/>
-		<edge from-layer="1557" from-port="0" to-layer="1558" to-port="1"/>
-		<edge from-layer="1558" from-port="2" to-layer="1560" to-port="0"/>
-		<edge from-layer="1559" from-port="0" to-layer="1560" to-port="1"/>
-		<edge from-layer="1560" from-port="2" to-layer="1562" to-port="0"/>
-		<edge from-layer="1560" from-port="2" to-layer="1571" to-port="0"/>
-		<edge from-layer="1561" from-port="0" to-layer="1570" to-port="0"/>
-		<edge from-layer="1562" from-port="1" to-layer="1565" to-port="0"/>
-		<edge from-layer="1562" from-port="1" to-layer="1583" to-port="0"/>
-		<edge from-layer="1562" from-port="1" to-layer="1576" to-port="0"/>
-		<edge from-layer="1563" from-port="0" to-layer="1565" to-port="1"/>
-		<edge from-layer="1564" from-port="0" to-layer="1565" to-port="2"/>
-		<edge from-layer="1565" from-port="3" to-layer="1585" to-port="0"/>
-		<edge from-layer="1565" from-port="3" to-layer="1567" to-port="0"/>
-		<edge from-layer="1566" from-port="0" to-layer="1567" to-port="1"/>
-		<edge from-layer="1567" from-port="2" to-layer="1569" to-port="0"/>
-		<edge from-layer="1568" from-port="0" to-layer="1569" to-port="1"/>
-		<edge from-layer="1569" from-port="2" to-layer="1570" to-port="2"/>
-		<edge from-layer="1570" from-port="3" to-layer="1571" to-port="1"/>
-		<edge from-layer="1571" from-port="2" to-layer="1573" to-port="0"/>
-		<edge from-layer="1572" from-port="0" to-layer="1573" to-port="1"/>
-		<edge from-layer="1573" from-port="2" to-layer="1589" to-port="0"/>
-		<edge from-layer="1574" from-port="0" to-layer="1576" to-port="1"/>
-		<edge from-layer="1575" from-port="0" to-layer="1576" to-port="2"/>
-		<edge from-layer="1576" from-port="3" to-layer="1578" to-port="0"/>
-		<edge from-layer="1577" from-port="0" to-layer="1578" to-port="1"/>
-		<edge from-layer="1578" from-port="2" to-layer="1580" to-port="0"/>
-		<edge from-layer="1579" from-port="0" to-layer="1580" to-port="1"/>
-		<edge from-layer="1580" from-port="2" to-layer="1588" to-port="0"/>
-		<edge from-layer="1581" from-port="0" to-layer="1583" to-port="1"/>
-		<edge from-layer="1582" from-port="0" to-layer="1583" to-port="2"/>
-		<edge from-layer="1583" from-port="3" to-layer="1588" to-port="1"/>
-		<edge from-layer="1584" from-port="0" to-layer="1585" to-port="1"/>
-		<edge from-layer="1585" from-port="2" to-layer="1587" to-port="0"/>
-		<edge from-layer="1586" from-port="0" to-layer="1587" to-port="1"/>
-		<edge from-layer="1587" from-port="2" to-layer="1588" to-port="2"/>
-		<edge from-layer="1588" from-port="3" to-layer="1589" to-port="1"/>
-		<edge from-layer="1589" from-port="2" to-layer="1596" to-port="0"/>
-		<edge from-layer="1590" from-port="0" to-layer="1591" to-port="1"/>
-		<edge from-layer="1591" from-port="2" to-layer="1592" to-port="0"/>
-		<edge from-layer="1592" from-port="2" to-layer="1594" to-port="0"/>
-		<edge from-layer="1593" from-port="0" to-layer="1594" to-port="1"/>
-		<edge from-layer="1594" from-port="2" to-layer="1595" to-port="0"/>
-		<edge from-layer="1595" from-port="2" to-layer="1596" to-port="1"/>
-		<edge from-layer="1596" from-port="2" to-layer="1598" to-port="0"/>
-		<edge from-layer="1597" from-port="0" to-layer="1598" to-port="1"/>
-		<edge from-layer="1598" from-port="2" to-layer="1599" to-port="0"/>
-		<edge from-layer="1599" from-port="1" to-layer="1606" to-port="0"/>
-		<edge from-layer="1600" from-port="0" to-layer="1601" to-port="1"/>
-		<edge from-layer="1601" from-port="2" to-layer="1602" to-port="0"/>
-		<edge from-layer="1602" from-port="2" to-layer="1604" to-port="0"/>
-		<edge from-layer="1603" from-port="0" to-layer="1604" to-port="1"/>
-		<edge from-layer="1604" from-port="2" to-layer="1605" to-port="0"/>
-		<edge from-layer="1605" from-port="2" to-layer="1606" to-port="1"/>
-		<edge from-layer="1606" from-port="2" to-layer="1607" to-port="0"/>
-		<edge from-layer="1606" from-port="2" to-layer="1619" to-port="0"/>
-		<edge from-layer="1607" from-port="1" to-layer="1610" to-port="0"/>
-		<edge from-layer="1607" from-port="1" to-layer="1617" to-port="0"/>
-		<edge from-layer="1607" from-port="1" to-layer="1631" to-port="0"/>
-		<edge from-layer="1607" from-port="1" to-layer="1628" to-port="0"/>
-		<edge from-layer="1608" from-port="0" to-layer="1610" to-port="1"/>
-		<edge from-layer="1609" from-port="0" to-layer="1610" to-port="2"/>
-		<edge from-layer="1610" from-port="3" to-layer="1612" to-port="0"/>
-		<edge from-layer="1610" from-port="3" to-layer="1623" to-port="0"/>
-		<edge from-layer="1611" from-port="0" to-layer="1612" to-port="1"/>
-		<edge from-layer="1612" from-port="2" to-layer="1614" to-port="0"/>
-		<edge from-layer="1613" from-port="0" to-layer="1614" to-port="1"/>
-		<edge from-layer="1614" from-port="2" to-layer="1618" to-port="0"/>
-		<edge from-layer="1615" from-port="0" to-layer="1617" to-port="1"/>
-		<edge from-layer="1616" from-port="0" to-layer="1617" to-port="2"/>
-		<edge from-layer="1617" from-port="3" to-layer="1618" to-port="2"/>
-		<edge from-layer="1618" from-port="3" to-layer="1619" to-port="1"/>
-		<edge from-layer="1619" from-port="2" to-layer="1621" to-port="0"/>
-		<edge from-layer="1620" from-port="0" to-layer="1621" to-port="1"/>
-		<edge from-layer="1621" from-port="2" to-layer="1637" to-port="0"/>
-		<edge from-layer="1622" from-port="0" to-layer="1623" to-port="1"/>
-		<edge from-layer="1623" from-port="2" to-layer="1625" to-port="0"/>
-		<edge from-layer="1624" from-port="0" to-layer="1625" to-port="1"/>
-		<edge from-layer="1625" from-port="2" to-layer="1636" to-port="0"/>
-		<edge from-layer="1626" from-port="0" to-layer="1628" to-port="1"/>
-		<edge from-layer="1627" from-port="0" to-layer="1628" to-port="2"/>
-		<edge from-layer="1628" from-port="3" to-layer="1636" to-port="1"/>
-		<edge from-layer="1629" from-port="0" to-layer="1631" to-port="1"/>
-		<edge from-layer="1630" from-port="0" to-layer="1631" to-port="2"/>
-		<edge from-layer="1631" from-port="3" to-layer="1633" to-port="0"/>
-		<edge from-layer="1632" from-port="0" to-layer="1633" to-port="1"/>
-		<edge from-layer="1633" from-port="2" to-layer="1635" to-port="0"/>
-		<edge from-layer="1634" from-port="0" to-layer="1635" to-port="1"/>
-		<edge from-layer="1635" from-port="2" to-layer="1636" to-port="2"/>
-		<edge from-layer="1636" from-port="3" to-layer="1637" to-port="1"/>
-		<edge from-layer="1637" from-port="2" to-layer="1639" to-port="0"/>
-		<edge from-layer="1638" from-port="0" to-layer="1639" to-port="1"/>
-		<edge from-layer="1639" from-port="2" to-layer="1640" to-port="1"/>
-		<edge from-layer="1640" from-port="2" to-layer="1641" to-port="0"/>
-		<edge from-layer="1641" from-port="2" to-layer="1681" to-port="1"/>
-		<edge from-layer="1641" from-port="2" to-layer="1643" to-port="0"/>
-		<edge from-layer="1642" from-port="0" to-layer="1643" to-port="1"/>
-		<edge from-layer="1643" from-port="2" to-layer="1645" to-port="0"/>
-		<edge from-layer="1644" from-port="0" to-layer="1645" to-port="1"/>
-		<edge from-layer="1645" from-port="2" to-layer="1647" to-port="0"/>
-		<edge from-layer="1646" from-port="0" to-layer="1647" to-port="1"/>
-		<edge from-layer="1647" from-port="2" to-layer="1649" to-port="0"/>
-		<edge from-layer="1648" from-port="0" to-layer="1649" to-port="1"/>
-		<edge from-layer="1649" from-port="2" to-layer="1650" to-port="1"/>
-		<edge from-layer="1650" from-port="2" to-layer="1654" to-port="0"/>
-		<edge from-layer="1650" from-port="2" to-layer="1675" to-port="0"/>
-		<edge from-layer="1650" from-port="2" to-layer="1665" to-port="0"/>
-		<edge from-layer="1651" from-port="0" to-layer="1665" to-port="1"/>
-		<edge from-layer="1652" from-port="0" to-layer="1663" to-port="0"/>
-		<edge from-layer="1653" from-port="0" to-layer="1663" to-port="1"/>
-		<edge from-layer="1654" from-port="1" to-layer="1657" to-port="0"/>
-		<edge from-layer="1655" from-port="0" to-layer="1657" to-port="1"/>
-		<edge from-layer="1656" from-port="0" to-layer="1657" to-port="2"/>
-		<edge from-layer="1657" from-port="3" to-layer="1659" to-port="0"/>
-		<edge from-layer="1658" from-port="0" to-layer="1659" to-port="1"/>
-		<edge from-layer="1659" from-port="2" to-layer="1661" to-port="0"/>
-		<edge from-layer="1660" from-port="0" to-layer="1661" to-port="1"/>
-		<edge from-layer="1661" from-port="2" to-layer="1672" to-port="0"/>
-		<edge from-layer="1661" from-port="2" to-layer="1663" to-port="2"/>
-		<edge from-layer="1661" from-port="2" to-layer="1669" to-port="2"/>
-		<edge from-layer="1662" from-port="0" to-layer="1663" to-port="3"/>
-		<edge from-layer="1663" from-port="4" to-layer="1665" to-port="2"/>
-		<edge from-layer="1664" from-port="0" to-layer="1665" to-port="3"/>
-		<edge from-layer="1665" from-port="4" to-layer="1677" to-port="0"/>
-		<edge from-layer="1666" from-port="0" to-layer="1669" to-port="0"/>
-		<edge from-layer="1667" from-port="0" to-layer="1673" to-port="1"/>
-		<edge from-layer="1667" from-port="0" to-layer="1669" to-port="1"/>
-		<edge from-layer="1668" from-port="0" to-layer="1673" to-port="3"/>
-		<edge from-layer="1668" from-port="0" to-layer="1669" to-port="3"/>
-		<edge from-layer="1669" from-port="4" to-layer="1675" to-port="1"/>
-		<edge from-layer="1670" from-port="0" to-layer="1673" to-port="0"/>
-		<edge from-layer="1671" from-port="0" to-layer="1672" to-port="1"/>
-		<edge from-layer="1672" from-port="2" to-layer="1673" to-port="2"/>
-		<edge from-layer="1673" from-port="4" to-layer="1675" to-port="2"/>
-		<edge from-layer="1674" from-port="0" to-layer="1675" to-port="3"/>
-		<edge from-layer="1675" from-port="4" to-layer="1676" to-port="0"/>
-		<edge from-layer="1676" from-port="1" to-layer="1677" to-port="1"/>
-		<edge from-layer="1677" from-port="2" to-layer="1679" to-port="0"/>
-		<edge from-layer="1678" from-port="0" to-layer="1679" to-port="1"/>
-		<edge from-layer="1679" from-port="2" to-layer="1680" to-port="1"/>
-		<edge from-layer="1680" from-port="2" to-layer="1681" to-port="0"/>
-		<edge from-layer="1681" from-port="2" to-layer="1682" to-port="0"/>
-		<edge from-layer="1682" from-port="2" to-layer="1684" to-port="0"/>
-		<edge from-layer="1683" from-port="0" to-layer="1684" to-port="1"/>
-		<edge from-layer="1684" from-port="2" to-layer="1686" to-port="0"/>
-		<edge from-layer="1685" from-port="0" to-layer="1686" to-port="1"/>
-		<edge from-layer="1686" from-port="2" to-layer="1688" to-port="0"/>
-		<edge from-layer="1687" from-port="0" to-layer="1688" to-port="1"/>
-		<edge from-layer="1688" from-port="2" to-layer="1689" to-port="0"/>
-		<edge from-layer="1689" from-port="2" to-layer="2228" to-port="1"/>
-		<edge from-layer="1689" from-port="2" to-layer="1691" to-port="0"/>
-		<edge from-layer="1690" from-port="0" to-layer="1691" to-port="1"/>
-		<edge from-layer="1691" from-port="2" to-layer="1693" to-port="0"/>
-		<edge from-layer="1692" from-port="0" to-layer="1693" to-port="1"/>
-		<edge from-layer="1693" from-port="2" to-layer="1695" to-port="0"/>
-		<edge from-layer="1693" from-port="2" to-layer="1734" to-port="0"/>
-		<edge from-layer="1693" from-port="2" to-layer="1698" to-port="0"/>
-		<edge from-layer="1693" from-port="2" to-layer="2172" to-port="1"/>
-		<edge from-layer="1694" from-port="0" to-layer="1695" to-port="1"/>
-		<edge from-layer="1695" from-port="2" to-layer="1697" to-port="0"/>
-		<edge from-layer="1696" from-port="0" to-layer="1697" to-port="1"/>
-		<edge from-layer="1697" from-port="2" to-layer="1699" to-port="0"/>
-		<edge from-layer="1698" from-port="1" to-layer="1699" to-port="1"/>
-		<edge from-layer="1699" from-port="2" to-layer="1701" to-port="0"/>
-		<edge from-layer="1700" from-port="0" to-layer="1701" to-port="1"/>
-		<edge from-layer="1701" from-port="2" to-layer="1703" to-port="0"/>
-		<edge from-layer="1702" from-port="0" to-layer="1703" to-port="1"/>
-		<edge from-layer="1703" from-port="2" to-layer="1704" to-port="0"/>
-		<edge from-layer="1704" from-port="1" to-layer="1706" to-port="0"/>
-		<edge from-layer="1705" from-port="0" to-layer="1706" to-port="1"/>
-		<edge from-layer="1706" from-port="2" to-layer="1708" to-port="0"/>
-		<edge from-layer="1707" from-port="0" to-layer="1708" to-port="1"/>
-		<edge from-layer="1708" from-port="2" to-layer="1718" to-port="0"/>
-		<edge from-layer="1709" from-port="1" to-layer="1711" to-port="0"/>
-		<edge from-layer="1710" from-port="0" to-layer="1711" to-port="1"/>
-		<edge from-layer="1711" from-port="2" to-layer="1713" to-port="0"/>
-		<edge from-layer="1712" from-port="0" to-layer="1713" to-port="1"/>
-		<edge from-layer="1713" from-port="2" to-layer="1715" to-port="0"/>
-		<edge from-layer="1714" from-port="0" to-layer="1715" to-port="1"/>
-		<edge from-layer="1715" from-port="2" to-layer="1717" to-port="0"/>
-		<edge from-layer="1716" from-port="0" to-layer="1717" to-port="1"/>
-		<edge from-layer="1717" from-port="2" to-layer="1718" to-port="1"/>
-		<edge from-layer="1718" from-port="2" to-layer="1723" to-port="0"/>
-		<edge from-layer="1718" from-port="2" to-layer="1720" to-port="0"/>
-		<edge from-layer="1719" from-port="0" to-layer="1720" to-port="1"/>
-		<edge from-layer="1720" from-port="2" to-layer="1722" to-port="0"/>
-		<edge from-layer="1721" from-port="0" to-layer="1722" to-port="1"/>
-		<edge from-layer="1722" from-port="2" to-layer="1724" to-port="0"/>
-		<edge from-layer="1723" from-port="1" to-layer="1724" to-port="1"/>
-		<edge from-layer="1724" from-port="2" to-layer="1726" to-port="0"/>
-		<edge from-layer="1725" from-port="0" to-layer="1726" to-port="1"/>
-		<edge from-layer="1726" from-port="2" to-layer="1728" to-port="0"/>
-		<edge from-layer="1727" from-port="0" to-layer="1728" to-port="1"/>
-		<edge from-layer="1728" from-port="2" to-layer="1729" to-port="0"/>
-		<edge from-layer="1729" from-port="1" to-layer="1731" to-port="0"/>
-		<edge from-layer="1730" from-port="0" to-layer="1731" to-port="1"/>
-		<edge from-layer="1731" from-port="2" to-layer="1733" to-port="0"/>
-		<edge from-layer="1732" from-port="0" to-layer="1733" to-port="1"/>
-		<edge from-layer="1733" from-port="2" to-layer="1734" to-port="1"/>
-		<edge from-layer="1734" from-port="2" to-layer="2126" to-port="1"/>
-		<edge from-layer="1734" from-port="2" to-layer="1736" to-port="0"/>
-		<edge from-layer="1734" from-port="2" to-layer="1775" to-port="0"/>
-		<edge from-layer="1734" from-port="2" to-layer="1739" to-port="0"/>
-		<edge from-layer="1735" from-port="0" to-layer="1736" to-port="1"/>
-		<edge from-layer="1736" from-port="2" to-layer="1738" to-port="0"/>
-		<edge from-layer="1737" from-port="0" to-layer="1738" to-port="1"/>
-		<edge from-layer="1738" from-port="2" to-layer="1740" to-port="0"/>
-		<edge from-layer="1739" from-port="1" to-layer="1740" to-port="1"/>
-		<edge from-layer="1740" from-port="2" to-layer="1742" to-port="0"/>
-		<edge from-layer="1741" from-port="0" to-layer="1742" to-port="1"/>
-		<edge from-layer="1742" from-port="2" to-layer="1744" to-port="0"/>
-		<edge from-layer="1743" from-port="0" to-layer="1744" to-port="1"/>
-		<edge from-layer="1744" from-port="2" to-layer="1745" to-port="0"/>
-		<edge from-layer="1745" from-port="1" to-layer="1747" to-port="0"/>
-		<edge from-layer="1746" from-port="0" to-layer="1747" to-port="1"/>
-		<edge from-layer="1747" from-port="2" to-layer="1749" to-port="0"/>
-		<edge from-layer="1748" from-port="0" to-layer="1749" to-port="1"/>
-		<edge from-layer="1749" from-port="2" to-layer="1759" to-port="0"/>
-		<edge from-layer="1750" from-port="1" to-layer="1752" to-port="0"/>
-		<edge from-layer="1751" from-port="0" to-layer="1752" to-port="1"/>
-		<edge from-layer="1752" from-port="2" to-layer="1754" to-port="0"/>
-		<edge from-layer="1753" from-port="0" to-layer="1754" to-port="1"/>
-		<edge from-layer="1754" from-port="2" to-layer="1756" to-port="0"/>
-		<edge from-layer="1755" from-port="0" to-layer="1756" to-port="1"/>
-		<edge from-layer="1756" from-port="2" to-layer="1758" to-port="0"/>
-		<edge from-layer="1757" from-port="0" to-layer="1758" to-port="1"/>
-		<edge from-layer="1758" from-port="2" to-layer="1759" to-port="1"/>
-		<edge from-layer="1759" from-port="2" to-layer="1761" to-port="0"/>
-		<edge from-layer="1759" from-port="2" to-layer="1764" to-port="0"/>
-		<edge from-layer="1760" from-port="0" to-layer="1761" to-port="1"/>
-		<edge from-layer="1761" from-port="2" to-layer="1763" to-port="0"/>
-		<edge from-layer="1762" from-port="0" to-layer="1763" to-port="1"/>
-		<edge from-layer="1763" from-port="2" to-layer="1765" to-port="0"/>
-		<edge from-layer="1764" from-port="1" to-layer="1765" to-port="1"/>
-		<edge from-layer="1765" from-port="2" to-layer="1767" to-port="0"/>
-		<edge from-layer="1766" from-port="0" to-layer="1767" to-port="1"/>
-		<edge from-layer="1767" from-port="2" to-layer="1769" to-port="0"/>
-		<edge from-layer="1768" from-port="0" to-layer="1769" to-port="1"/>
-		<edge from-layer="1769" from-port="2" to-layer="1770" to-port="0"/>
-		<edge from-layer="1770" from-port="1" to-layer="1772" to-port="0"/>
-		<edge from-layer="1771" from-port="0" to-layer="1772" to-port="1"/>
-		<edge from-layer="1772" from-port="2" to-layer="1774" to-port="0"/>
-		<edge from-layer="1773" from-port="0" to-layer="1774" to-port="1"/>
-		<edge from-layer="1774" from-port="2" to-layer="1775" to-port="1"/>
-		<edge from-layer="1775" from-port="2" to-layer="1816" to-port="0"/>
-		<edge from-layer="1775" from-port="2" to-layer="2080" to-port="1"/>
-		<edge from-layer="1775" from-port="2" to-layer="1780" to-port="0"/>
-		<edge from-layer="1775" from-port="2" to-layer="1777" to-port="0"/>
-		<edge from-layer="1776" from-port="0" to-layer="1777" to-port="1"/>
-		<edge from-layer="1777" from-port="2" to-layer="1779" to-port="0"/>
-		<edge from-layer="1778" from-port="0" to-layer="1779" to-port="1"/>
-		<edge from-layer="1779" from-port="2" to-layer="1781" to-port="0"/>
-		<edge from-layer="1780" from-port="1" to-layer="1781" to-port="1"/>
-		<edge from-layer="1781" from-port="2" to-layer="1783" to-port="0"/>
-		<edge from-layer="1782" from-port="0" to-layer="1783" to-port="1"/>
-		<edge from-layer="1783" from-port="2" to-layer="1785" to-port="0"/>
-		<edge from-layer="1784" from-port="0" to-layer="1785" to-port="1"/>
-		<edge from-layer="1785" from-port="2" to-layer="1786" to-port="0"/>
-		<edge from-layer="1786" from-port="1" to-layer="1788" to-port="0"/>
-		<edge from-layer="1787" from-port="0" to-layer="1788" to-port="1"/>
-		<edge from-layer="1788" from-port="2" to-layer="1790" to-port="0"/>
-		<edge from-layer="1789" from-port="0" to-layer="1790" to-port="1"/>
-		<edge from-layer="1790" from-port="2" to-layer="1800" to-port="0"/>
-		<edge from-layer="1791" from-port="1" to-layer="1793" to-port="0"/>
-		<edge from-layer="1792" from-port="0" to-layer="1793" to-port="1"/>
-		<edge from-layer="1793" from-port="2" to-layer="1795" to-port="0"/>
-		<edge from-layer="1794" from-port="0" to-layer="1795" to-port="1"/>
-		<edge from-layer="1795" from-port="2" to-layer="1797" to-port="0"/>
-		<edge from-layer="1796" from-port="0" to-layer="1797" to-port="1"/>
-		<edge from-layer="1797" from-port="2" to-layer="1799" to-port="0"/>
-		<edge from-layer="1798" from-port="0" to-layer="1799" to-port="1"/>
-		<edge from-layer="1799" from-port="2" to-layer="1800" to-port="1"/>
-		<edge from-layer="1800" from-port="2" to-layer="1802" to-port="0"/>
-		<edge from-layer="1800" from-port="2" to-layer="1805" to-port="0"/>
-		<edge from-layer="1801" from-port="0" to-layer="1802" to-port="1"/>
-		<edge from-layer="1802" from-port="2" to-layer="1804" to-port="0"/>
-		<edge from-layer="1803" from-port="0" to-layer="1804" to-port="1"/>
-		<edge from-layer="1804" from-port="2" to-layer="1806" to-port="0"/>
-		<edge from-layer="1805" from-port="1" to-layer="1806" to-port="1"/>
-		<edge from-layer="1806" from-port="2" to-layer="1808" to-port="0"/>
-		<edge from-layer="1807" from-port="0" to-layer="1808" to-port="1"/>
-		<edge from-layer="1808" from-port="2" to-layer="1810" to-port="0"/>
-		<edge from-layer="1809" from-port="0" to-layer="1810" to-port="1"/>
-		<edge from-layer="1810" from-port="2" to-layer="1811" to-port="0"/>
-		<edge from-layer="1811" from-port="1" to-layer="1813" to-port="0"/>
-		<edge from-layer="1812" from-port="0" to-layer="1813" to-port="1"/>
-		<edge from-layer="1813" from-port="2" to-layer="1815" to-port="0"/>
-		<edge from-layer="1814" from-port="0" to-layer="1815" to-port="1"/>
-		<edge from-layer="1815" from-port="2" to-layer="1816" to-port="1"/>
-		<edge from-layer="1816" from-port="2" to-layer="2038" to-port="1"/>
-		<edge from-layer="1816" from-port="2" to-layer="1821" to-port="0"/>
-		<edge from-layer="1816" from-port="2" to-layer="1818" to-port="0"/>
-		<edge from-layer="1817" from-port="0" to-layer="1818" to-port="1"/>
-		<edge from-layer="1818" from-port="2" to-layer="1820" to-port="0"/>
-		<edge from-layer="1819" from-port="0" to-layer="1820" to-port="1"/>
-		<edge from-layer="1820" from-port="2" to-layer="1822" to-port="0"/>
-		<edge from-layer="1821" from-port="1" to-layer="1822" to-port="1"/>
-		<edge from-layer="1822" from-port="2" to-layer="1824" to-port="0"/>
-		<edge from-layer="1823" from-port="0" to-layer="1824" to-port="1"/>
-		<edge from-layer="1824" from-port="2" to-layer="1826" to-port="0"/>
-		<edge from-layer="1825" from-port="0" to-layer="1826" to-port="1"/>
-		<edge from-layer="1826" from-port="2" to-layer="1828" to-port="0"/>
-		<edge from-layer="1827" from-port="0" to-layer="1828" to-port="1"/>
-		<edge from-layer="1828" from-port="2" to-layer="1830" to-port="0"/>
-		<edge from-layer="1829" from-port="0" to-layer="1830" to-port="1"/>
-		<edge from-layer="1830" from-port="2" to-layer="1832" to-port="0"/>
-		<edge from-layer="1831" from-port="0" to-layer="1832" to-port="1"/>
-		<edge from-layer="1832" from-port="2" to-layer="1834" to-port="0"/>
-		<edge from-layer="1833" from-port="0" to-layer="1834" to-port="1"/>
-		<edge from-layer="1834" from-port="2" to-layer="1836" to-port="0"/>
-		<edge from-layer="1834" from-port="2" to-layer="1900" to-port="1"/>
-		<edge from-layer="1835" from-port="0" to-layer="1836" to-port="1"/>
-		<edge from-layer="1836" from-port="2" to-layer="1838" to-port="0"/>
-		<edge from-layer="1837" from-port="0" to-layer="1838" to-port="1"/>
-		<edge from-layer="1838" from-port="2" to-layer="1840" to-port="0"/>
-		<edge from-layer="1839" from-port="0" to-layer="1840" to-port="1"/>
-		<edge from-layer="1840" from-port="2" to-layer="1860" to-port="0"/>
-		<edge from-layer="1840" from-port="2" to-layer="1850" to-port="0"/>
-		<edge from-layer="1840" from-port="2" to-layer="1842" to-port="0"/>
-		<edge from-layer="1841" from-port="0" to-layer="1842" to-port="1"/>
-		<edge from-layer="1842" from-port="2" to-layer="1844" to-port="0"/>
-		<edge from-layer="1843" from-port="0" to-layer="1851" to-port="1"/>
-		<edge from-layer="1843" from-port="0" to-layer="1861" to-port="1"/>
-		<edge from-layer="1843" from-port="0" to-layer="1844" to-port="1"/>
-		<edge from-layer="1844" from-port="2" to-layer="1846" to-port="0"/>
-		<edge from-layer="1845" from-port="0" to-layer="1846" to-port="1"/>
-		<edge from-layer="1846" from-port="2" to-layer="1848" to-port="0"/>
-		<edge from-layer="1847" from-port="0" to-layer="1854" to-port="1"/>
-		<edge from-layer="1847" from-port="0" to-layer="1864" to-port="1"/>
-		<edge from-layer="1847" from-port="0" to-layer="1848" to-port="1"/>
-		<edge from-layer="1848" from-port="2" to-layer="1855" to-port="0"/>
-		<edge from-layer="1849" from-port="0" to-layer="1850" to-port="1"/>
-		<edge from-layer="1850" from-port="2" to-layer="1851" to-port="0"/>
-		<edge from-layer="1851" from-port="2" to-layer="1853" to-port="0"/>
-		<edge from-layer="1852" from-port="0" to-layer="1853" to-port="1"/>
-		<edge from-layer="1853" from-port="2" to-layer="1854" to-port="0"/>
-		<edge from-layer="1854" from-port="2" to-layer="1855" to-port="1"/>
-		<edge from-layer="1855" from-port="2" to-layer="1857" to-port="0"/>
-		<edge from-layer="1856" from-port="0" to-layer="1857" to-port="1"/>
-		<edge from-layer="1857" from-port="2" to-layer="1858" to-port="0"/>
-		<edge from-layer="1858" from-port="1" to-layer="1865" to-port="0"/>
-		<edge from-layer="1859" from-port="0" to-layer="1860" to-port="1"/>
-		<edge from-layer="1860" from-port="2" to-layer="1861" to-port="0"/>
-		<edge from-layer="1861" from-port="2" to-layer="1863" to-port="0"/>
-		<edge from-layer="1862" from-port="0" to-layer="1863" to-port="1"/>
-		<edge from-layer="1863" from-port="2" to-layer="1864" to-port="0"/>
-		<edge from-layer="1864" from-port="2" to-layer="1865" to-port="1"/>
-		<edge from-layer="1865" from-port="2" to-layer="1878" to-port="0"/>
-		<edge from-layer="1865" from-port="2" to-layer="1866" to-port="0"/>
-		<edge from-layer="1866" from-port="1" to-layer="1887" to-port="0"/>
-		<edge from-layer="1866" from-port="1" to-layer="1876" to-port="0"/>
-		<edge from-layer="1866" from-port="1" to-layer="1890" to-port="0"/>
-		<edge from-layer="1866" from-port="1" to-layer="1869" to-port="0"/>
-		<edge from-layer="1867" from-port="0" to-layer="1869" to-port="1"/>
-		<edge from-layer="1868" from-port="0" to-layer="1869" to-port="2"/>
-		<edge from-layer="1869" from-port="3" to-layer="1882" to-port="0"/>
-		<edge from-layer="1869" from-port="3" to-layer="1871" to-port="0"/>
-		<edge from-layer="1870" from-port="0" to-layer="1871" to-port="1"/>
-		<edge from-layer="1871" from-port="2" to-layer="1873" to-port="0"/>
-		<edge from-layer="1872" from-port="0" to-layer="1873" to-port="1"/>
-		<edge from-layer="1873" from-port="2" to-layer="1877" to-port="0"/>
-		<edge from-layer="1874" from-port="0" to-layer="1876" to-port="1"/>
-		<edge from-layer="1875" from-port="0" to-layer="1876" to-port="2"/>
-		<edge from-layer="1876" from-port="3" to-layer="1877" to-port="2"/>
-		<edge from-layer="1877" from-port="3" to-layer="1878" to-port="1"/>
-		<edge from-layer="1878" from-port="2" to-layer="1880" to-port="0"/>
-		<edge from-layer="1879" from-port="0" to-layer="1880" to-port="1"/>
-		<edge from-layer="1880" from-port="2" to-layer="1896" to-port="0"/>
-		<edge from-layer="1881" from-port="0" to-layer="1882" to-port="1"/>
-		<edge from-layer="1882" from-port="2" to-layer="1884" to-port="0"/>
-		<edge from-layer="1883" from-port="0" to-layer="1884" to-port="1"/>
-		<edge from-layer="1884" from-port="2" to-layer="1895" to-port="0"/>
-		<edge from-layer="1885" from-port="0" to-layer="1887" to-port="1"/>
-		<edge from-layer="1886" from-port="0" to-layer="1887" to-port="2"/>
-		<edge from-layer="1887" from-port="3" to-layer="1895" to-port="1"/>
-		<edge from-layer="1888" from-port="0" to-layer="1890" to-port="1"/>
-		<edge from-layer="1889" from-port="0" to-layer="1890" to-port="2"/>
-		<edge from-layer="1890" from-port="3" to-layer="1892" to-port="0"/>
-		<edge from-layer="1891" from-port="0" to-layer="1892" to-port="1"/>
-		<edge from-layer="1892" from-port="2" to-layer="1894" to-port="0"/>
-		<edge from-layer="1893" from-port="0" to-layer="1894" to-port="1"/>
-		<edge from-layer="1894" from-port="2" to-layer="1895" to-port="2"/>
-		<edge from-layer="1895" from-port="3" to-layer="1896" to-port="1"/>
-		<edge from-layer="1896" from-port="2" to-layer="1898" to-port="0"/>
-		<edge from-layer="1897" from-port="0" to-layer="1898" to-port="1"/>
-		<edge from-layer="1898" from-port="2" to-layer="1899" to-port="1"/>
-		<edge from-layer="1899" from-port="2" to-layer="1900" to-port="0"/>
-		<edge from-layer="1900" from-port="2" to-layer="1902" to-port="0"/>
-		<edge from-layer="1900" from-port="2" to-layer="1989" to-port="1"/>
-		<edge from-layer="1901" from-port="0" to-layer="1902" to-port="1"/>
-		<edge from-layer="1902" from-port="2" to-layer="1904" to-port="0"/>
-		<edge from-layer="1903" from-port="0" to-layer="1904" to-port="1"/>
-		<edge from-layer="1904" from-port="2" to-layer="1906" to-port="0"/>
-		<edge from-layer="1905" from-port="0" to-layer="1906" to-port="1"/>
-		<edge from-layer="1906" from-port="2" to-layer="1908" to-port="0"/>
-		<edge from-layer="1907" from-port="0" to-layer="1908" to-port="1"/>
-		<edge from-layer="1908" from-port="2" to-layer="1910" to-port="0"/>
-		<edge from-layer="1908" from-port="2" to-layer="1919" to-port="0"/>
-		<edge from-layer="1909" from-port="0" to-layer="1918" to-port="0"/>
-		<edge from-layer="1910" from-port="1" to-layer="1931" to-port="0"/>
-		<edge from-layer="1910" from-port="1" to-layer="1913" to-port="0"/>
-		<edge from-layer="1910" from-port="1" to-layer="1924" to-port="0"/>
-		<edge from-layer="1911" from-port="0" to-layer="1913" to-port="1"/>
-		<edge from-layer="1912" from-port="0" to-layer="1913" to-port="2"/>
-		<edge from-layer="1913" from-port="3" to-layer="1933" to-port="0"/>
-		<edge from-layer="1913" from-port="3" to-layer="1915" to-port="0"/>
-		<edge from-layer="1914" from-port="0" to-layer="1915" to-port="1"/>
-		<edge from-layer="1915" from-port="2" to-layer="1917" to-port="0"/>
-		<edge from-layer="1916" from-port="0" to-layer="1917" to-port="1"/>
-		<edge from-layer="1917" from-port="2" to-layer="1918" to-port="2"/>
-		<edge from-layer="1918" from-port="3" to-layer="1919" to-port="1"/>
-		<edge from-layer="1919" from-port="2" to-layer="1921" to-port="0"/>
-		<edge from-layer="1920" from-port="0" to-layer="1921" to-port="1"/>
-		<edge from-layer="1921" from-port="2" to-layer="1937" to-port="0"/>
-		<edge from-layer="1922" from-port="0" to-layer="1924" to-port="1"/>
-		<edge from-layer="1923" from-port="0" to-layer="1924" to-port="2"/>
-		<edge from-layer="1924" from-port="3" to-layer="1926" to-port="0"/>
-		<edge from-layer="1925" from-port="0" to-layer="1926" to-port="1"/>
-		<edge from-layer="1926" from-port="2" to-layer="1928" to-port="0"/>
-		<edge from-layer="1927" from-port="0" to-layer="1928" to-port="1"/>
-		<edge from-layer="1928" from-port="2" to-layer="1936" to-port="0"/>
-		<edge from-layer="1929" from-port="0" to-layer="1931" to-port="1"/>
-		<edge from-layer="1930" from-port="0" to-layer="1931" to-port="2"/>
-		<edge from-layer="1931" from-port="3" to-layer="1936" to-port="1"/>
-		<edge from-layer="1932" from-port="0" to-layer="1933" to-port="1"/>
-		<edge from-layer="1933" from-port="2" to-layer="1935" to-port="0"/>
-		<edge from-layer="1934" from-port="0" to-layer="1935" to-port="1"/>
-		<edge from-layer="1935" from-port="2" to-layer="1936" to-port="2"/>
-		<edge from-layer="1936" from-port="3" to-layer="1937" to-port="1"/>
-		<edge from-layer="1937" from-port="2" to-layer="1944" to-port="0"/>
-		<edge from-layer="1938" from-port="0" to-layer="1939" to-port="1"/>
-		<edge from-layer="1939" from-port="2" to-layer="1940" to-port="0"/>
-		<edge from-layer="1940" from-port="2" to-layer="1942" to-port="0"/>
-		<edge from-layer="1941" from-port="0" to-layer="1942" to-port="1"/>
-		<edge from-layer="1942" from-port="2" to-layer="1943" to-port="0"/>
-		<edge from-layer="1943" from-port="2" to-layer="1944" to-port="1"/>
-		<edge from-layer="1944" from-port="2" to-layer="1946" to-port="0"/>
-		<edge from-layer="1945" from-port="0" to-layer="1946" to-port="1"/>
-		<edge from-layer="1946" from-port="2" to-layer="1947" to-port="0"/>
-		<edge from-layer="1947" from-port="1" to-layer="1954" to-port="0"/>
-		<edge from-layer="1948" from-port="0" to-layer="1949" to-port="1"/>
-		<edge from-layer="1949" from-port="2" to-layer="1950" to-port="0"/>
-		<edge from-layer="1950" from-port="2" to-layer="1952" to-port="0"/>
-		<edge from-layer="1951" from-port="0" to-layer="1952" to-port="1"/>
-		<edge from-layer="1952" from-port="2" to-layer="1953" to-port="0"/>
-		<edge from-layer="1953" from-port="2" to-layer="1954" to-port="1"/>
-		<edge from-layer="1954" from-port="2" to-layer="1955" to-port="0"/>
-		<edge from-layer="1954" from-port="2" to-layer="1967" to-port="0"/>
-		<edge from-layer="1955" from-port="1" to-layer="1965" to-port="0"/>
-		<edge from-layer="1955" from-port="1" to-layer="1976" to-port="0"/>
-		<edge from-layer="1955" from-port="1" to-layer="1979" to-port="0"/>
-		<edge from-layer="1955" from-port="1" to-layer="1958" to-port="0"/>
-		<edge from-layer="1956" from-port="0" to-layer="1958" to-port="1"/>
-		<edge from-layer="1957" from-port="0" to-layer="1958" to-port="2"/>
-		<edge from-layer="1958" from-port="3" to-layer="1971" to-port="0"/>
-		<edge from-layer="1958" from-port="3" to-layer="1960" to-port="0"/>
-		<edge from-layer="1959" from-port="0" to-layer="1960" to-port="1"/>
-		<edge from-layer="1960" from-port="2" to-layer="1962" to-port="0"/>
-		<edge from-layer="1961" from-port="0" to-layer="1962" to-port="1"/>
-		<edge from-layer="1962" from-port="2" to-layer="1966" to-port="0"/>
-		<edge from-layer="1963" from-port="0" to-layer="1965" to-port="1"/>
-		<edge from-layer="1964" from-port="0" to-layer="1965" to-port="2"/>
-		<edge from-layer="1965" from-port="3" to-layer="1966" to-port="2"/>
-		<edge from-layer="1966" from-port="3" to-layer="1967" to-port="1"/>
-		<edge from-layer="1967" from-port="2" to-layer="1969" to-port="0"/>
-		<edge from-layer="1968" from-port="0" to-layer="1969" to-port="1"/>
-		<edge from-layer="1969" from-port="2" to-layer="1985" to-port="0"/>
-		<edge from-layer="1970" from-port="0" to-layer="1971" to-port="1"/>
-		<edge from-layer="1971" from-port="2" to-layer="1973" to-port="0"/>
-		<edge from-layer="1972" from-port="0" to-layer="1973" to-port="1"/>
-		<edge from-layer="1973" from-port="2" to-layer="1984" to-port="0"/>
-		<edge from-layer="1974" from-port="0" to-layer="1976" to-port="1"/>
-		<edge from-layer="1975" from-port="0" to-layer="1976" to-port="2"/>
-		<edge from-layer="1976" from-port="3" to-layer="1984" to-port="1"/>
-		<edge from-layer="1977" from-port="0" to-layer="1979" to-port="1"/>
-		<edge from-layer="1978" from-port="0" to-layer="1979" to-port="2"/>
-		<edge from-layer="1979" from-port="3" to-layer="1981" to-port="0"/>
-		<edge from-layer="1980" from-port="0" to-layer="1981" to-port="1"/>
-		<edge from-layer="1981" from-port="2" to-layer="1983" to-port="0"/>
-		<edge from-layer="1982" from-port="0" to-layer="1983" to-port="1"/>
-		<edge from-layer="1983" from-port="2" to-layer="1984" to-port="2"/>
-		<edge from-layer="1984" from-port="3" to-layer="1985" to-port="1"/>
-		<edge from-layer="1985" from-port="2" to-layer="1987" to-port="0"/>
-		<edge from-layer="1986" from-port="0" to-layer="1987" to-port="1"/>
-		<edge from-layer="1987" from-port="2" to-layer="1988" to-port="1"/>
-		<edge from-layer="1988" from-port="2" to-layer="1989" to-port="0"/>
-		<edge from-layer="1989" from-port="2" to-layer="1991" to-port="0"/>
-		<edge from-layer="1989" from-port="2" to-layer="2029" to-port="1"/>
-		<edge from-layer="1990" from-port="0" to-layer="1991" to-port="1"/>
-		<edge from-layer="1991" from-port="2" to-layer="1993" to-port="0"/>
-		<edge from-layer="1992" from-port="0" to-layer="1993" to-port="1"/>
-		<edge from-layer="1993" from-port="2" to-layer="1995" to-port="0"/>
-		<edge from-layer="1994" from-port="0" to-layer="1995" to-port="1"/>
-		<edge from-layer="1995" from-port="2" to-layer="1997" to-port="0"/>
-		<edge from-layer="1996" from-port="0" to-layer="1997" to-port="1"/>
-		<edge from-layer="1997" from-port="2" to-layer="1998" to-port="1"/>
-		<edge from-layer="1998" from-port="2" to-layer="2013" to-port="0"/>
-		<edge from-layer="1998" from-port="2" to-layer="2002" to-port="0"/>
-		<edge from-layer="1998" from-port="2" to-layer="2023" to-port="0"/>
-		<edge from-layer="1999" from-port="0" to-layer="2013" to-port="1"/>
-		<edge from-layer="2000" from-port="0" to-layer="2011" to-port="0"/>
-		<edge from-layer="2001" from-port="0" to-layer="2011" to-port="1"/>
-		<edge from-layer="2002" from-port="1" to-layer="2005" to-port="0"/>
-		<edge from-layer="2003" from-port="0" to-layer="2005" to-port="1"/>
-		<edge from-layer="2004" from-port="0" to-layer="2005" to-port="2"/>
-		<edge from-layer="2005" from-port="3" to-layer="2007" to-port="0"/>
-		<edge from-layer="2006" from-port="0" to-layer="2007" to-port="1"/>
-		<edge from-layer="2007" from-port="2" to-layer="2009" to-port="0"/>
-		<edge from-layer="2008" from-port="0" to-layer="2009" to-port="1"/>
-		<edge from-layer="2009" from-port="2" to-layer="2017" to-port="2"/>
-		<edge from-layer="2009" from-port="2" to-layer="2020" to-port="0"/>
-		<edge from-layer="2009" from-port="2" to-layer="2011" to-port="2"/>
-		<edge from-layer="2010" from-port="0" to-layer="2011" to-port="3"/>
-		<edge from-layer="2011" from-port="4" to-layer="2013" to-port="2"/>
-		<edge from-layer="2012" from-port="0" to-layer="2013" to-port="3"/>
-		<edge from-layer="2013" from-port="4" to-layer="2025" to-port="0"/>
-		<edge from-layer="2014" from-port="0" to-layer="2017" to-port="0"/>
-		<edge from-layer="2015" from-port="0" to-layer="2021" to-port="1"/>
-		<edge from-layer="2015" from-port="0" to-layer="2017" to-port="1"/>
-		<edge from-layer="2016" from-port="0" to-layer="2017" to-port="3"/>
-		<edge from-layer="2016" from-port="0" to-layer="2021" to-port="3"/>
-		<edge from-layer="2017" from-port="4" to-layer="2023" to-port="1"/>
-		<edge from-layer="2018" from-port="0" to-layer="2021" to-port="0"/>
-		<edge from-layer="2019" from-port="0" to-layer="2020" to-port="1"/>
-		<edge from-layer="2020" from-port="2" to-layer="2021" to-port="2"/>
-		<edge from-layer="2021" from-port="4" to-layer="2023" to-port="2"/>
-		<edge from-layer="2022" from-port="0" to-layer="2023" to-port="3"/>
-		<edge from-layer="2023" from-port="4" to-layer="2024" to-port="0"/>
-		<edge from-layer="2024" from-port="1" to-layer="2025" to-port="1"/>
-		<edge from-layer="2025" from-port="2" to-layer="2027" to-port="0"/>
-		<edge from-layer="2026" from-port="0" to-layer="2027" to-port="1"/>
-		<edge from-layer="2027" from-port="2" to-layer="2028" to-port="1"/>
-		<edge from-layer="2028" from-port="2" to-layer="2029" to-port="0"/>
-		<edge from-layer="2029" from-port="2" to-layer="2031" to-port="0"/>
-		<edge from-layer="2030" from-port="0" to-layer="2031" to-port="1"/>
-		<edge from-layer="2031" from-port="2" to-layer="2033" to-port="0"/>
-		<edge from-layer="2032" from-port="0" to-layer="2033" to-port="1"/>
-		<edge from-layer="2033" from-port="2" to-layer="2035" to-port="0"/>
-		<edge from-layer="2034" from-port="0" to-layer="2035" to-port="1"/>
-		<edge from-layer="2035" from-port="2" to-layer="2037" to-port="0"/>
-		<edge from-layer="2036" from-port="0" to-layer="2037" to-port="1"/>
-		<edge from-layer="2037" from-port="2" to-layer="2038" to-port="0"/>
-		<edge from-layer="2038" from-port="2" to-layer="2040" to-port="0"/>
-		<edge from-layer="2038" from-port="2" to-layer="2043" to-port="0"/>
-		<edge from-layer="2038" from-port="2" to-layer="2079" to-port="0"/>
-		<edge from-layer="2039" from-port="0" to-layer="2040" to-port="1"/>
-		<edge from-layer="2040" from-port="2" to-layer="2042" to-port="0"/>
-		<edge from-layer="2041" from-port="0" to-layer="2042" to-port="1"/>
-		<edge from-layer="2042" from-port="2" to-layer="2044" to-port="0"/>
-		<edge from-layer="2043" from-port="1" to-layer="2044" to-port="1"/>
-		<edge from-layer="2044" from-port="2" to-layer="2046" to-port="0"/>
-		<edge from-layer="2045" from-port="0" to-layer="2046" to-port="1"/>
-		<edge from-layer="2046" from-port="2" to-layer="2048" to-port="0"/>
-		<edge from-layer="2047" from-port="0" to-layer="2048" to-port="1"/>
-		<edge from-layer="2048" from-port="2" to-layer="2049" to-port="0"/>
-		<edge from-layer="2049" from-port="1" to-layer="2051" to-port="0"/>
-		<edge from-layer="2050" from-port="0" to-layer="2051" to-port="1"/>
-		<edge from-layer="2051" from-port="2" to-layer="2053" to-port="0"/>
-		<edge from-layer="2052" from-port="0" to-layer="2053" to-port="1"/>
-		<edge from-layer="2053" from-port="2" to-layer="2063" to-port="0"/>
-		<edge from-layer="2054" from-port="1" to-layer="2056" to-port="0"/>
-		<edge from-layer="2055" from-port="0" to-layer="2056" to-port="1"/>
-		<edge from-layer="2056" from-port="2" to-layer="2058" to-port="0"/>
-		<edge from-layer="2057" from-port="0" to-layer="2058" to-port="1"/>
-		<edge from-layer="2058" from-port="2" to-layer="2060" to-port="0"/>
-		<edge from-layer="2059" from-port="0" to-layer="2060" to-port="1"/>
-		<edge from-layer="2060" from-port="2" to-layer="2062" to-port="0"/>
-		<edge from-layer="2061" from-port="0" to-layer="2062" to-port="1"/>
-		<edge from-layer="2062" from-port="2" to-layer="2063" to-port="1"/>
-		<edge from-layer="2063" from-port="2" to-layer="2065" to-port="0"/>
-		<edge from-layer="2063" from-port="2" to-layer="2068" to-port="0"/>
-		<edge from-layer="2064" from-port="0" to-layer="2065" to-port="1"/>
-		<edge from-layer="2065" from-port="2" to-layer="2067" to-port="0"/>
-		<edge from-layer="2066" from-port="0" to-layer="2067" to-port="1"/>
-		<edge from-layer="2067" from-port="2" to-layer="2069" to-port="0"/>
-		<edge from-layer="2068" from-port="1" to-layer="2069" to-port="1"/>
-		<edge from-layer="2069" from-port="2" to-layer="2071" to-port="0"/>
-		<edge from-layer="2070" from-port="0" to-layer="2071" to-port="1"/>
-		<edge from-layer="2071" from-port="2" to-layer="2073" to-port="0"/>
-		<edge from-layer="2072" from-port="0" to-layer="2073" to-port="1"/>
-		<edge from-layer="2073" from-port="2" to-layer="2074" to-port="0"/>
-		<edge from-layer="2074" from-port="1" to-layer="2076" to-port="0"/>
-		<edge from-layer="2075" from-port="0" to-layer="2076" to-port="1"/>
-		<edge from-layer="2076" from-port="2" to-layer="2078" to-port="0"/>
-		<edge from-layer="2077" from-port="0" to-layer="2078" to-port="1"/>
-		<edge from-layer="2078" from-port="2" to-layer="2079" to-port="1"/>
-		<edge from-layer="2079" from-port="2" to-layer="2080" to-port="0"/>
-		<edge from-layer="2080" from-port="2" to-layer="2082" to-port="0"/>
-		<edge from-layer="2080" from-port="2" to-layer="2089" to-port="0"/>
-		<edge from-layer="2080" from-port="2" to-layer="2086" to-port="0"/>
-		<edge from-layer="2081" from-port="0" to-layer="2082" to-port="1"/>
-		<edge from-layer="2082" from-port="2" to-layer="2084" to-port="0"/>
-		<edge from-layer="2083" from-port="0" to-layer="2084" to-port="1"/>
-		<edge from-layer="2084" from-port="2" to-layer="2125" to-port="0"/>
-		<edge from-layer="2085" from-port="0" to-layer="2086" to-port="1"/>
-		<edge from-layer="2086" from-port="2" to-layer="2088" to-port="0"/>
-		<edge from-layer="2087" from-port="0" to-layer="2088" to-port="1"/>
-		<edge from-layer="2088" from-port="2" to-layer="2090" to-port="0"/>
-		<edge from-layer="2089" from-port="1" to-layer="2090" to-port="1"/>
-		<edge from-layer="2090" from-port="2" to-layer="2092" to-port="0"/>
-		<edge from-layer="2091" from-port="0" to-layer="2092" to-port="1"/>
-		<edge from-layer="2092" from-port="2" to-layer="2094" to-port="0"/>
-		<edge from-layer="2093" from-port="0" to-layer="2094" to-port="1"/>
-		<edge from-layer="2094" from-port="2" to-layer="2095" to-port="0"/>
-		<edge from-layer="2095" from-port="1" to-layer="2097" to-port="0"/>
-		<edge from-layer="2096" from-port="0" to-layer="2097" to-port="1"/>
-		<edge from-layer="2097" from-port="2" to-layer="2099" to-port="0"/>
-		<edge from-layer="2098" from-port="0" to-layer="2099" to-port="1"/>
-		<edge from-layer="2099" from-port="2" to-layer="2109" to-port="0"/>
-		<edge from-layer="2100" from-port="1" to-layer="2102" to-port="0"/>
-		<edge from-layer="2101" from-port="0" to-layer="2102" to-port="1"/>
-		<edge from-layer="2102" from-port="2" to-layer="2104" to-port="0"/>
-		<edge from-layer="2103" from-port="0" to-layer="2104" to-port="1"/>
-		<edge from-layer="2104" from-port="2" to-layer="2106" to-port="0"/>
-		<edge from-layer="2105" from-port="0" to-layer="2106" to-port="1"/>
-		<edge from-layer="2106" from-port="2" to-layer="2108" to-port="0"/>
-		<edge from-layer="2107" from-port="0" to-layer="2108" to-port="1"/>
-		<edge from-layer="2108" from-port="2" to-layer="2109" to-port="1"/>
-		<edge from-layer="2109" from-port="2" to-layer="2111" to-port="0"/>
-		<edge from-layer="2109" from-port="2" to-layer="2114" to-port="0"/>
-		<edge from-layer="2110" from-port="0" to-layer="2111" to-port="1"/>
-		<edge from-layer="2111" from-port="2" to-layer="2113" to-port="0"/>
-		<edge from-layer="2112" from-port="0" to-layer="2113" to-port="1"/>
-		<edge from-layer="2113" from-port="2" to-layer="2115" to-port="0"/>
-		<edge from-layer="2114" from-port="1" to-layer="2115" to-port="1"/>
-		<edge from-layer="2115" from-port="2" to-layer="2117" to-port="0"/>
-		<edge from-layer="2116" from-port="0" to-layer="2117" to-port="1"/>
-		<edge from-layer="2117" from-port="2" to-layer="2119" to-port="0"/>
-		<edge from-layer="2118" from-port="0" to-layer="2119" to-port="1"/>
-		<edge from-layer="2119" from-port="2" to-layer="2120" to-port="0"/>
-		<edge from-layer="2120" from-port="1" to-layer="2122" to-port="0"/>
-		<edge from-layer="2121" from-port="0" to-layer="2122" to-port="1"/>
-		<edge from-layer="2122" from-port="2" to-layer="2124" to-port="0"/>
-		<edge from-layer="2123" from-port="0" to-layer="2124" to-port="1"/>
-		<edge from-layer="2124" from-port="2" to-layer="2125" to-port="1"/>
-		<edge from-layer="2125" from-port="2" to-layer="2126" to-port="0"/>
-		<edge from-layer="2126" from-port="2" to-layer="2128" to-port="0"/>
-		<edge from-layer="2126" from-port="2" to-layer="2132" to-port="0"/>
-		<edge from-layer="2126" from-port="2" to-layer="2135" to-port="0"/>
-		<edge from-layer="2127" from-port="0" to-layer="2128" to-port="1"/>
-		<edge from-layer="2128" from-port="2" to-layer="2130" to-port="0"/>
-		<edge from-layer="2129" from-port="0" to-layer="2130" to-port="1"/>
-		<edge from-layer="2130" from-port="2" to-layer="2171" to-port="0"/>
-		<edge from-layer="2131" from-port="0" to-layer="2132" to-port="1"/>
-		<edge from-layer="2132" from-port="2" to-layer="2134" to-port="0"/>
-		<edge from-layer="2133" from-port="0" to-layer="2134" to-port="1"/>
-		<edge from-layer="2134" from-port="2" to-layer="2136" to-port="0"/>
-		<edge from-layer="2135" from-port="1" to-layer="2136" to-port="1"/>
-		<edge from-layer="2136" from-port="2" to-layer="2138" to-port="0"/>
-		<edge from-layer="2137" from-port="0" to-layer="2138" to-port="1"/>
-		<edge from-layer="2138" from-port="2" to-layer="2140" to-port="0"/>
-		<edge from-layer="2139" from-port="0" to-layer="2140" to-port="1"/>
-		<edge from-layer="2140" from-port="2" to-layer="2141" to-port="0"/>
-		<edge from-layer="2141" from-port="1" to-layer="2143" to-port="0"/>
-		<edge from-layer="2142" from-port="0" to-layer="2143" to-port="1"/>
-		<edge from-layer="2143" from-port="2" to-layer="2145" to-port="0"/>
-		<edge from-layer="2144" from-port="0" to-layer="2145" to-port="1"/>
-		<edge from-layer="2145" from-port="2" to-layer="2155" to-port="0"/>
-		<edge from-layer="2146" from-port="1" to-layer="2148" to-port="0"/>
-		<edge from-layer="2147" from-port="0" to-layer="2148" to-port="1"/>
-		<edge from-layer="2148" from-port="2" to-layer="2150" to-port="0"/>
-		<edge from-layer="2149" from-port="0" to-layer="2150" to-port="1"/>
-		<edge from-layer="2150" from-port="2" to-layer="2152" to-port="0"/>
-		<edge from-layer="2151" from-port="0" to-layer="2152" to-port="1"/>
-		<edge from-layer="2152" from-port="2" to-layer="2154" to-port="0"/>
-		<edge from-layer="2153" from-port="0" to-layer="2154" to-port="1"/>
-		<edge from-layer="2154" from-port="2" to-layer="2155" to-port="1"/>
-		<edge from-layer="2155" from-port="2" to-layer="2157" to-port="0"/>
-		<edge from-layer="2155" from-port="2" to-layer="2160" to-port="0"/>
-		<edge from-layer="2156" from-port="0" to-layer="2157" to-port="1"/>
-		<edge from-layer="2157" from-port="2" to-layer="2159" to-port="0"/>
-		<edge from-layer="2158" from-port="0" to-layer="2159" to-port="1"/>
-		<edge from-layer="2159" from-port="2" to-layer="2161" to-port="0"/>
-		<edge from-layer="2160" from-port="1" to-layer="2161" to-port="1"/>
-		<edge from-layer="2161" from-port="2" to-layer="2163" to-port="0"/>
-		<edge from-layer="2162" from-port="0" to-layer="2163" to-port="1"/>
-		<edge from-layer="2163" from-port="2" to-layer="2165" to-port="0"/>
-		<edge from-layer="2164" from-port="0" to-layer="2165" to-port="1"/>
-		<edge from-layer="2165" from-port="2" to-layer="2166" to-port="0"/>
-		<edge from-layer="2166" from-port="1" to-layer="2168" to-port="0"/>
-		<edge from-layer="2167" from-port="0" to-layer="2168" to-port="1"/>
-		<edge from-layer="2168" from-port="2" to-layer="2170" to-port="0"/>
-		<edge from-layer="2169" from-port="0" to-layer="2170" to-port="1"/>
-		<edge from-layer="2170" from-port="2" to-layer="2171" to-port="1"/>
-		<edge from-layer="2171" from-port="2" to-layer="2172" to-port="0"/>
-		<edge from-layer="2172" from-port="2" to-layer="2174" to-port="0"/>
-		<edge from-layer="2172" from-port="2" to-layer="2181" to-port="0"/>
-		<edge from-layer="2172" from-port="2" to-layer="2178" to-port="0"/>
-		<edge from-layer="2173" from-port="0" to-layer="2174" to-port="1"/>
-		<edge from-layer="2174" from-port="2" to-layer="2176" to-port="0"/>
-		<edge from-layer="2175" from-port="0" to-layer="2176" to-port="1"/>
-		<edge from-layer="2176" from-port="2" to-layer="2217" to-port="0"/>
-		<edge from-layer="2177" from-port="0" to-layer="2178" to-port="1"/>
-		<edge from-layer="2178" from-port="2" to-layer="2180" to-port="0"/>
-		<edge from-layer="2179" from-port="0" to-layer="2180" to-port="1"/>
-		<edge from-layer="2180" from-port="2" to-layer="2182" to-port="0"/>
-		<edge from-layer="2181" from-port="1" to-layer="2182" to-port="1"/>
-		<edge from-layer="2182" from-port="2" to-layer="2184" to-port="0"/>
-		<edge from-layer="2183" from-port="0" to-layer="2184" to-port="1"/>
-		<edge from-layer="2184" from-port="2" to-layer="2186" to-port="0"/>
-		<edge from-layer="2185" from-port="0" to-layer="2186" to-port="1"/>
-		<edge from-layer="2186" from-port="2" to-layer="2187" to-port="0"/>
-		<edge from-layer="2187" from-port="1" to-layer="2189" to-port="0"/>
-		<edge from-layer="2188" from-port="0" to-layer="2189" to-port="1"/>
-		<edge from-layer="2189" from-port="2" to-layer="2191" to-port="0"/>
-		<edge from-layer="2190" from-port="0" to-layer="2191" to-port="1"/>
-		<edge from-layer="2191" from-port="2" to-layer="2201" to-port="0"/>
-		<edge from-layer="2192" from-port="1" to-layer="2194" to-port="0"/>
-		<edge from-layer="2193" from-port="0" to-layer="2194" to-port="1"/>
-		<edge from-layer="2194" from-port="2" to-layer="2196" to-port="0"/>
-		<edge from-layer="2195" from-port="0" to-layer="2196" to-port="1"/>
-		<edge from-layer="2196" from-port="2" to-layer="2198" to-port="0"/>
-		<edge from-layer="2197" from-port="0" to-layer="2198" to-port="1"/>
-		<edge from-layer="2198" from-port="2" to-layer="2200" to-port="0"/>
-		<edge from-layer="2199" from-port="0" to-layer="2200" to-port="1"/>
-		<edge from-layer="2200" from-port="2" to-layer="2201" to-port="1"/>
-		<edge from-layer="2201" from-port="2" to-layer="2206" to-port="0"/>
-		<edge from-layer="2201" from-port="2" to-layer="2203" to-port="0"/>
-		<edge from-layer="2202" from-port="0" to-layer="2203" to-port="1"/>
-		<edge from-layer="2203" from-port="2" to-layer="2205" to-port="0"/>
-		<edge from-layer="2204" from-port="0" to-layer="2205" to-port="1"/>
-		<edge from-layer="2205" from-port="2" to-layer="2207" to-port="0"/>
-		<edge from-layer="2206" from-port="1" to-layer="2207" to-port="1"/>
-		<edge from-layer="2207" from-port="2" to-layer="2209" to-port="0"/>
-		<edge from-layer="2208" from-port="0" to-layer="2209" to-port="1"/>
-		<edge from-layer="2209" from-port="2" to-layer="2211" to-port="0"/>
-		<edge from-layer="2210" from-port="0" to-layer="2211" to-port="1"/>
-		<edge from-layer="2211" from-port="2" to-layer="2212" to-port="0"/>
-		<edge from-layer="2212" from-port="1" to-layer="2214" to-port="0"/>
-		<edge from-layer="2213" from-port="0" to-layer="2214" to-port="1"/>
-		<edge from-layer="2214" from-port="2" to-layer="2216" to-port="0"/>
-		<edge from-layer="2215" from-port="0" to-layer="2216" to-port="1"/>
-		<edge from-layer="2216" from-port="2" to-layer="2217" to-port="1"/>
-		<edge from-layer="2217" from-port="2" to-layer="2218" to-port="0"/>
-		<edge from-layer="2217" from-port="2" to-layer="2223" to-port="0"/>
-		<edge from-layer="2218" from-port="1" to-layer="2219" to-port="0"/>
-		<edge from-layer="2219" from-port="1" to-layer="2221" to-port="0"/>
-		<edge from-layer="2220" from-port="0" to-layer="3306" to-port="2"/>
-		<edge from-layer="2220" from-port="0" to-layer="3304" to-port="1"/>
-		<edge from-layer="2220" from-port="0" to-layer="2221" to-port="1"/>
-		<edge from-layer="2220" from-port="0" to-layer="2223" to-port="2"/>
-		<edge from-layer="2220" from-port="0" to-layer="4389" to-port="2"/>
-		<edge from-layer="2220" from-port="0" to-layer="4387" to-port="1"/>
-		<edge from-layer="2221" from-port="2" to-layer="2222" to-port="0"/>
-		<edge from-layer="2222" from-port="1" to-layer="2223" to-port="1"/>
-		<edge from-layer="2223" from-port="3" to-layer="2225" to-port="0"/>
-		<edge from-layer="2224" from-port="0" to-layer="2225" to-port="1"/>
-		<edge from-layer="2225" from-port="2" to-layer="2227" to-port="0"/>
-		<edge from-layer="2226" from-port="0" to-layer="2227" to-port="1"/>
-		<edge from-layer="2227" from-port="2" to-layer="2228" to-port="0"/>
-		<edge from-layer="2228" from-port="2" to-layer="2237" to-port="0"/>
-		<edge from-layer="2228" from-port="2" to-layer="2234" to-port="0"/>
-		<edge from-layer="2228" from-port="2" to-layer="2230" to-port="0"/>
-		<edge from-layer="2229" from-port="0" to-layer="2230" to-port="1"/>
-		<edge from-layer="2230" from-port="2" to-layer="2232" to-port="0"/>
-		<edge from-layer="2231" from-port="0" to-layer="2232" to-port="1"/>
-		<edge from-layer="2232" from-port="2" to-layer="2273" to-port="0"/>
-		<edge from-layer="2233" from-port="0" to-layer="2234" to-port="1"/>
-		<edge from-layer="2234" from-port="2" to-layer="2236" to-port="0"/>
-		<edge from-layer="2235" from-port="0" to-layer="2236" to-port="1"/>
-		<edge from-layer="2236" from-port="2" to-layer="2238" to-port="0"/>
-		<edge from-layer="2237" from-port="1" to-layer="2238" to-port="1"/>
-		<edge from-layer="2238" from-port="2" to-layer="2240" to-port="0"/>
-		<edge from-layer="2239" from-port="0" to-layer="2240" to-port="1"/>
-		<edge from-layer="2240" from-port="2" to-layer="2242" to-port="0"/>
-		<edge from-layer="2241" from-port="0" to-layer="2242" to-port="1"/>
-		<edge from-layer="2242" from-port="2" to-layer="2243" to-port="0"/>
-		<edge from-layer="2243" from-port="1" to-layer="2245" to-port="0"/>
-		<edge from-layer="2244" from-port="0" to-layer="2245" to-port="1"/>
-		<edge from-layer="2245" from-port="2" to-layer="2247" to-port="0"/>
-		<edge from-layer="2246" from-port="0" to-layer="2247" to-port="1"/>
-		<edge from-layer="2247" from-port="2" to-layer="2257" to-port="0"/>
-		<edge from-layer="2248" from-port="1" to-layer="2250" to-port="0"/>
-		<edge from-layer="2249" from-port="0" to-layer="2250" to-port="1"/>
-		<edge from-layer="2250" from-port="2" to-layer="2252" to-port="0"/>
-		<edge from-layer="2251" from-port="0" to-layer="2252" to-port="1"/>
-		<edge from-layer="2252" from-port="2" to-layer="2254" to-port="0"/>
-		<edge from-layer="2253" from-port="0" to-layer="2254" to-port="1"/>
-		<edge from-layer="2254" from-port="2" to-layer="2256" to-port="0"/>
-		<edge from-layer="2255" from-port="0" to-layer="2256" to-port="1"/>
-		<edge from-layer="2256" from-port="2" to-layer="2257" to-port="1"/>
-		<edge from-layer="2257" from-port="2" to-layer="2259" to-port="0"/>
-		<edge from-layer="2257" from-port="2" to-layer="2262" to-port="0"/>
-		<edge from-layer="2258" from-port="0" to-layer="2259" to-port="1"/>
-		<edge from-layer="2259" from-port="2" to-layer="2261" to-port="0"/>
-		<edge from-layer="2260" from-port="0" to-layer="2261" to-port="1"/>
-		<edge from-layer="2261" from-port="2" to-layer="2263" to-port="0"/>
-		<edge from-layer="2262" from-port="1" to-layer="2263" to-port="1"/>
-		<edge from-layer="2263" from-port="2" to-layer="2265" to-port="0"/>
-		<edge from-layer="2264" from-port="0" to-layer="2265" to-port="1"/>
-		<edge from-layer="2265" from-port="2" to-layer="2267" to-port="0"/>
-		<edge from-layer="2266" from-port="0" to-layer="2267" to-port="1"/>
-		<edge from-layer="2267" from-port="2" to-layer="2268" to-port="0"/>
-		<edge from-layer="2268" from-port="1" to-layer="2270" to-port="0"/>
-		<edge from-layer="2269" from-port="0" to-layer="2270" to-port="1"/>
-		<edge from-layer="2270" from-port="2" to-layer="2272" to-port="0"/>
-		<edge from-layer="2271" from-port="0" to-layer="2272" to-port="1"/>
-		<edge from-layer="2272" from-port="2" to-layer="2273" to-port="1"/>
-		<edge from-layer="2273" from-port="2" to-layer="2585" to-port="1"/>
-		<edge from-layer="2273" from-port="2" to-layer="2278" to-port="0"/>
-		<edge from-layer="2273" from-port="2" to-layer="2275" to-port="0"/>
-		<edge from-layer="2274" from-port="0" to-layer="2275" to-port="1"/>
-		<edge from-layer="2275" from-port="2" to-layer="2277" to-port="0"/>
-		<edge from-layer="2276" from-port="0" to-layer="2277" to-port="1"/>
-		<edge from-layer="2277" from-port="2" to-layer="2279" to-port="0"/>
-		<edge from-layer="2278" from-port="1" to-layer="2292" to-port="0"/>
-		<edge from-layer="2278" from-port="1" to-layer="2577" to-port="0"/>
-		<edge from-layer="2278" from-port="1" to-layer="2304" to-port="0"/>
-		<edge from-layer="2278" from-port="1" to-layer="2298" to-port="0"/>
-		<edge from-layer="2278" from-port="1" to-layer="2295" to-port="0"/>
-		<edge from-layer="2278" from-port="1" to-layer="2279" to-port="1"/>
-		<edge from-layer="2279" from-port="2" to-layer="2281" to-port="0"/>
-		<edge from-layer="2280" from-port="0" to-layer="2281" to-port="1"/>
-		<edge from-layer="2281" from-port="2" to-layer="2283" to-port="0"/>
-		<edge from-layer="2282" from-port="0" to-layer="2283" to-port="1"/>
-		<edge from-layer="2283" from-port="2" to-layer="2285" to-port="0"/>
-		<edge from-layer="2284" from-port="0" to-layer="2285" to-port="1"/>
-		<edge from-layer="2285" from-port="2" to-layer="2287" to-port="0"/>
-		<edge from-layer="2286" from-port="0" to-layer="2287" to-port="1"/>
-		<edge from-layer="2287" from-port="2" to-layer="2289" to-port="0"/>
-		<edge from-layer="2288" from-port="0" to-layer="2289" to-port="1"/>
-		<edge from-layer="2289" from-port="2" to-layer="2306" to-port="0"/>
-		<edge from-layer="2290" from-port="0" to-layer="2292" to-port="1"/>
-		<edge from-layer="2291" from-port="0" to-layer="2292" to-port="2"/>
-		<edge from-layer="2292" from-port="3" to-layer="2305" to-port="0"/>
-		<edge from-layer="2293" from-port="0" to-layer="2295" to-port="1"/>
-		<edge from-layer="2294" from-port="0" to-layer="2295" to-port="2"/>
-		<edge from-layer="2295" from-port="3" to-layer="2299" to-port="0"/>
-		<edge from-layer="2296" from-port="0" to-layer="2298" to-port="1"/>
-		<edge from-layer="2297" from-port="0" to-layer="2298" to-port="2"/>
-		<edge from-layer="2298" from-port="3" to-layer="2299" to-port="1"/>
-		<edge from-layer="2299" from-port="2" to-layer="2301" to-port="0"/>
-		<edge from-layer="2300" from-port="0" to-layer="2301" to-port="1"/>
-		<edge from-layer="2301" from-port="2" to-layer="2305" to-port="1"/>
-		<edge from-layer="2302" from-port="0" to-layer="2304" to-port="1"/>
-		<edge from-layer="2303" from-port="0" to-layer="2304" to-port="2"/>
-		<edge from-layer="2304" from-port="3" to-layer="2305" to-port="2"/>
-		<edge from-layer="2305" from-port="3" to-layer="2306" to-port="1"/>
-		<edge from-layer="2306" from-port="2" to-layer="2445" to-port="1"/>
-		<edge from-layer="2306" from-port="2" to-layer="2308" to-port="0"/>
-		<edge from-layer="2307" from-port="0" to-layer="2308" to-port="1"/>
-		<edge from-layer="2308" from-port="2" to-layer="2310" to-port="0"/>
-		<edge from-layer="2309" from-port="0" to-layer="2310" to-port="1"/>
-		<edge from-layer="2310" from-port="2" to-layer="2312" to-port="0"/>
-		<edge from-layer="2311" from-port="0" to-layer="2312" to-port="1"/>
-		<edge from-layer="2312" from-port="2" to-layer="2380" to-port="0"/>
-		<edge from-layer="2312" from-port="2" to-layer="2345" to-port="0"/>
-		<edge from-layer="2312" from-port="2" to-layer="2314" to-port="0"/>
-		<edge from-layer="2313" from-port="0" to-layer="2314" to-port="1"/>
-		<edge from-layer="2314" from-port="2" to-layer="2316" to-port="0"/>
-		<edge from-layer="2314" from-port="2" to-layer="2325" to-port="0"/>
-		<edge from-layer="2315" from-port="0" to-layer="2324" to-port="0"/>
-		<edge from-layer="2316" from-port="1" to-layer="2319" to-port="0"/>
-		<edge from-layer="2316" from-port="1" to-layer="2330" to-port="0"/>
-		<edge from-layer="2316" from-port="1" to-layer="2337" to-port="0"/>
-		<edge from-layer="2317" from-port="0" to-layer="2319" to-port="1"/>
-		<edge from-layer="2318" from-port="0" to-layer="2319" to-port="2"/>
-		<edge from-layer="2319" from-port="3" to-layer="2321" to-port="0"/>
-		<edge from-layer="2319" from-port="3" to-layer="2339" to-port="0"/>
-		<edge from-layer="2320" from-port="0" to-layer="2321" to-port="1"/>
-		<edge from-layer="2321" from-port="2" to-layer="2323" to-port="0"/>
-		<edge from-layer="2322" from-port="0" to-layer="2323" to-port="1"/>
-		<edge from-layer="2323" from-port="2" to-layer="2324" to-port="2"/>
-		<edge from-layer="2324" from-port="3" to-layer="2325" to-port="1"/>
-		<edge from-layer="2325" from-port="2" to-layer="2327" to-port="0"/>
-		<edge from-layer="2326" from-port="0" to-layer="2327" to-port="1"/>
-		<edge from-layer="2327" from-port="2" to-layer="2343" to-port="0"/>
-		<edge from-layer="2328" from-port="0" to-layer="2330" to-port="1"/>
-		<edge from-layer="2329" from-port="0" to-layer="2330" to-port="2"/>
-		<edge from-layer="2330" from-port="3" to-layer="2332" to-port="0"/>
-		<edge from-layer="2331" from-port="0" to-layer="2332" to-port="1"/>
-		<edge from-layer="2332" from-port="2" to-layer="2334" to-port="0"/>
-		<edge from-layer="2333" from-port="0" to-layer="2334" to-port="1"/>
-		<edge from-layer="2334" from-port="2" to-layer="2342" to-port="0"/>
-		<edge from-layer="2335" from-port="0" to-layer="2337" to-port="1"/>
-		<edge from-layer="2336" from-port="0" to-layer="2337" to-port="2"/>
-		<edge from-layer="2337" from-port="3" to-layer="2342" to-port="1"/>
-		<edge from-layer="2338" from-port="0" to-layer="2339" to-port="1"/>
-		<edge from-layer="2339" from-port="2" to-layer="2341" to-port="0"/>
-		<edge from-layer="2340" from-port="0" to-layer="2341" to-port="1"/>
-		<edge from-layer="2341" from-port="2" to-layer="2342" to-port="2"/>
-		<edge from-layer="2342" from-port="3" to-layer="2343" to-port="1"/>
-		<edge from-layer="2343" from-port="2" to-layer="2375" to-port="0"/>
-		<edge from-layer="2344" from-port="0" to-layer="2345" to-port="1"/>
-		<edge from-layer="2345" from-port="2" to-layer="2347" to-port="0"/>
-		<edge from-layer="2345" from-port="2" to-layer="2356" to-port="0"/>
-		<edge from-layer="2346" from-port="0" to-layer="2355" to-port="0"/>
-		<edge from-layer="2347" from-port="1" to-layer="2361" to-port="0"/>
-		<edge from-layer="2347" from-port="1" to-layer="2368" to-port="0"/>
-		<edge from-layer="2347" from-port="1" to-layer="2350" to-port="0"/>
-		<edge from-layer="2348" from-port="0" to-layer="2350" to-port="1"/>
-		<edge from-layer="2349" from-port="0" to-layer="2350" to-port="2"/>
-		<edge from-layer="2350" from-port="3" to-layer="2352" to-port="0"/>
-		<edge from-layer="2350" from-port="3" to-layer="2370" to-port="0"/>
-		<edge from-layer="2351" from-port="0" to-layer="2352" to-port="1"/>
-		<edge from-layer="2352" from-port="2" to-layer="2354" to-port="0"/>
-		<edge from-layer="2353" from-port="0" to-layer="2354" to-port="1"/>
-		<edge from-layer="2354" from-port="2" to-layer="2355" to-port="2"/>
-		<edge from-layer="2355" from-port="3" to-layer="2356" to-port="1"/>
-		<edge from-layer="2356" from-port="2" to-layer="2358" to-port="0"/>
-		<edge from-layer="2357" from-port="0" to-layer="2358" to-port="1"/>
-		<edge from-layer="2358" from-port="2" to-layer="2374" to-port="0"/>
-		<edge from-layer="2359" from-port="0" to-layer="2361" to-port="1"/>
-		<edge from-layer="2360" from-port="0" to-layer="2361" to-port="2"/>
-		<edge from-layer="2361" from-port="3" to-layer="2363" to-port="0"/>
-		<edge from-layer="2362" from-port="0" to-layer="2363" to-port="1"/>
-		<edge from-layer="2363" from-port="2" to-layer="2365" to-port="0"/>
-		<edge from-layer="2364" from-port="0" to-layer="2365" to-port="1"/>
-		<edge from-layer="2365" from-port="2" to-layer="2373" to-port="0"/>
-		<edge from-layer="2366" from-port="0" to-layer="2368" to-port="1"/>
-		<edge from-layer="2367" from-port="0" to-layer="2368" to-port="2"/>
-		<edge from-layer="2368" from-port="3" to-layer="2373" to-port="1"/>
-		<edge from-layer="2369" from-port="0" to-layer="2370" to-port="1"/>
-		<edge from-layer="2370" from-port="2" to-layer="2372" to-port="0"/>
-		<edge from-layer="2371" from-port="0" to-layer="2372" to-port="1"/>
-		<edge from-layer="2372" from-port="2" to-layer="2373" to-port="2"/>
-		<edge from-layer="2373" from-port="3" to-layer="2374" to-port="1"/>
-		<edge from-layer="2374" from-port="2" to-layer="2375" to-port="1"/>
-		<edge from-layer="2375" from-port="2" to-layer="2377" to-port="0"/>
-		<edge from-layer="2376" from-port="0" to-layer="2377" to-port="1"/>
-		<edge from-layer="2377" from-port="2" to-layer="2378" to-port="0"/>
-		<edge from-layer="2378" from-port="1" to-layer="2410" to-port="0"/>
-		<edge from-layer="2379" from-port="0" to-layer="2380" to-port="1"/>
-		<edge from-layer="2380" from-port="2" to-layer="2391" to-port="0"/>
-		<edge from-layer="2380" from-port="2" to-layer="2382" to-port="0"/>
-		<edge from-layer="2381" from-port="0" to-layer="2390" to-port="0"/>
-		<edge from-layer="2382" from-port="1" to-layer="2385" to-port="0"/>
-		<edge from-layer="2382" from-port="1" to-layer="2403" to-port="0"/>
-		<edge from-layer="2382" from-port="1" to-layer="2396" to-port="0"/>
-		<edge from-layer="2383" from-port="0" to-layer="2385" to-port="1"/>
-		<edge from-layer="2384" from-port="0" to-layer="2385" to-port="2"/>
-		<edge from-layer="2385" from-port="3" to-layer="2387" to-port="0"/>
-		<edge from-layer="2385" from-port="3" to-layer="2405" to-port="0"/>
-		<edge from-layer="2386" from-port="0" to-layer="2387" to-port="1"/>
-		<edge from-layer="2387" from-port="2" to-layer="2389" to-port="0"/>
-		<edge from-layer="2388" from-port="0" to-layer="2389" to-port="1"/>
-		<edge from-layer="2389" from-port="2" to-layer="2390" to-port="2"/>
-		<edge from-layer="2390" from-port="3" to-layer="2391" to-port="1"/>
-		<edge from-layer="2391" from-port="2" to-layer="2393" to-port="0"/>
-		<edge from-layer="2392" from-port="0" to-layer="2393" to-port="1"/>
-		<edge from-layer="2393" from-port="2" to-layer="2409" to-port="0"/>
-		<edge from-layer="2394" from-port="0" to-layer="2396" to-port="1"/>
-		<edge from-layer="2395" from-port="0" to-layer="2396" to-port="2"/>
-		<edge from-layer="2396" from-port="3" to-layer="2398" to-port="0"/>
-		<edge from-layer="2397" from-port="0" to-layer="2398" to-port="1"/>
-		<edge from-layer="2398" from-port="2" to-layer="2400" to-port="0"/>
-		<edge from-layer="2399" from-port="0" to-layer="2400" to-port="1"/>
-		<edge from-layer="2400" from-port="2" to-layer="2408" to-port="0"/>
-		<edge from-layer="2401" from-port="0" to-layer="2403" to-port="1"/>
-		<edge from-layer="2402" from-port="0" to-layer="2403" to-port="2"/>
-		<edge from-layer="2403" from-port="3" to-layer="2408" to-port="1"/>
-		<edge from-layer="2404" from-port="0" to-layer="2405" to-port="1"/>
-		<edge from-layer="2405" from-port="2" to-layer="2407" to-port="0"/>
-		<edge from-layer="2406" from-port="0" to-layer="2407" to-port="1"/>
-		<edge from-layer="2407" from-port="2" to-layer="2408" to-port="2"/>
-		<edge from-layer="2408" from-port="3" to-layer="2409" to-port="1"/>
-		<edge from-layer="2409" from-port="2" to-layer="2410" to-port="1"/>
-		<edge from-layer="2410" from-port="2" to-layer="2411" to-port="0"/>
-		<edge from-layer="2410" from-port="2" to-layer="2423" to-port="0"/>
-		<edge from-layer="2411" from-port="1" to-layer="2421" to-port="0"/>
-		<edge from-layer="2411" from-port="1" to-layer="2414" to-port="0"/>
-		<edge from-layer="2411" from-port="1" to-layer="2432" to-port="0"/>
-		<edge from-layer="2411" from-port="1" to-layer="2435" to-port="0"/>
-		<edge from-layer="2412" from-port="0" to-layer="2414" to-port="1"/>
-		<edge from-layer="2413" from-port="0" to-layer="2414" to-port="2"/>
-		<edge from-layer="2414" from-port="3" to-layer="2416" to-port="0"/>
-		<edge from-layer="2414" from-port="3" to-layer="2427" to-port="0"/>
-		<edge from-layer="2415" from-port="0" to-layer="2416" to-port="1"/>
-		<edge from-layer="2416" from-port="2" to-layer="2418" to-port="0"/>
-		<edge from-layer="2417" from-port="0" to-layer="2418" to-port="1"/>
-		<edge from-layer="2418" from-port="2" to-layer="2422" to-port="0"/>
-		<edge from-layer="2419" from-port="0" to-layer="2421" to-port="1"/>
-		<edge from-layer="2420" from-port="0" to-layer="2421" to-port="2"/>
-		<edge from-layer="2421" from-port="3" to-layer="2422" to-port="2"/>
-		<edge from-layer="2422" from-port="3" to-layer="2423" to-port="1"/>
-		<edge from-layer="2423" from-port="2" to-layer="2425" to-port="0"/>
-		<edge from-layer="2424" from-port="0" to-layer="2425" to-port="1"/>
-		<edge from-layer="2425" from-port="2" to-layer="2441" to-port="0"/>
-		<edge from-layer="2426" from-port="0" to-layer="2427" to-port="1"/>
-		<edge from-layer="2427" from-port="2" to-layer="2429" to-port="0"/>
-		<edge from-layer="2428" from-port="0" to-layer="2429" to-port="1"/>
-		<edge from-layer="2429" from-port="2" to-layer="2440" to-port="0"/>
-		<edge from-layer="2430" from-port="0" to-layer="2432" to-port="1"/>
-		<edge from-layer="2431" from-port="0" to-layer="2432" to-port="2"/>
-		<edge from-layer="2432" from-port="3" to-layer="2440" to-port="1"/>
-		<edge from-layer="2433" from-port="0" to-layer="2435" to-port="1"/>
-		<edge from-layer="2434" from-port="0" to-layer="2435" to-port="2"/>
-		<edge from-layer="2435" from-port="3" to-layer="2437" to-port="0"/>
-		<edge from-layer="2436" from-port="0" to-layer="2437" to-port="1"/>
-		<edge from-layer="2437" from-port="2" to-layer="2439" to-port="0"/>
-		<edge from-layer="2438" from-port="0" to-layer="2439" to-port="1"/>
-		<edge from-layer="2439" from-port="2" to-layer="2440" to-port="2"/>
-		<edge from-layer="2440" from-port="3" to-layer="2441" to-port="1"/>
-		<edge from-layer="2441" from-port="2" to-layer="2443" to-port="0"/>
-		<edge from-layer="2442" from-port="0" to-layer="2443" to-port="1"/>
-		<edge from-layer="2443" from-port="2" to-layer="2444" to-port="1"/>
-		<edge from-layer="2444" from-port="2" to-layer="2445" to-port="0"/>
-		<edge from-layer="2445" from-port="2" to-layer="2534" to-port="1"/>
-		<edge from-layer="2445" from-port="2" to-layer="2447" to-port="0"/>
-		<edge from-layer="2446" from-port="0" to-layer="2447" to-port="1"/>
-		<edge from-layer="2447" from-port="2" to-layer="2449" to-port="0"/>
-		<edge from-layer="2448" from-port="0" to-layer="2449" to-port="1"/>
-		<edge from-layer="2449" from-port="2" to-layer="2451" to-port="0"/>
-		<edge from-layer="2450" from-port="0" to-layer="2451" to-port="1"/>
-		<edge from-layer="2451" from-port="2" to-layer="2453" to-port="0"/>
-		<edge from-layer="2452" from-port="0" to-layer="2453" to-port="1"/>
-		<edge from-layer="2453" from-port="2" to-layer="2464" to-port="0"/>
-		<edge from-layer="2453" from-port="2" to-layer="2455" to-port="0"/>
-		<edge from-layer="2454" from-port="0" to-layer="2463" to-port="0"/>
-		<edge from-layer="2455" from-port="1" to-layer="2476" to-port="0"/>
-		<edge from-layer="2455" from-port="1" to-layer="2469" to-port="0"/>
-		<edge from-layer="2455" from-port="1" to-layer="2458" to-port="0"/>
-		<edge from-layer="2456" from-port="0" to-layer="2458" to-port="1"/>
-		<edge from-layer="2457" from-port="0" to-layer="2458" to-port="2"/>
-		<edge from-layer="2458" from-port="3" to-layer="2460" to-port="0"/>
-		<edge from-layer="2458" from-port="3" to-layer="2478" to-port="0"/>
-		<edge from-layer="2459" from-port="0" to-layer="2460" to-port="1"/>
-		<edge from-layer="2460" from-port="2" to-layer="2462" to-port="0"/>
-		<edge from-layer="2461" from-port="0" to-layer="2462" to-port="1"/>
-		<edge from-layer="2462" from-port="2" to-layer="2463" to-port="2"/>
-		<edge from-layer="2463" from-port="3" to-layer="2464" to-port="1"/>
-		<edge from-layer="2464" from-port="2" to-layer="2466" to-port="0"/>
-		<edge from-layer="2465" from-port="0" to-layer="2466" to-port="1"/>
-		<edge from-layer="2466" from-port="2" to-layer="2482" to-port="0"/>
-		<edge from-layer="2467" from-port="0" to-layer="2469" to-port="1"/>
-		<edge from-layer="2468" from-port="0" to-layer="2469" to-port="2"/>
-		<edge from-layer="2469" from-port="3" to-layer="2471" to-port="0"/>
-		<edge from-layer="2470" from-port="0" to-layer="2471" to-port="1"/>
-		<edge from-layer="2471" from-port="2" to-layer="2473" to-port="0"/>
-		<edge from-layer="2472" from-port="0" to-layer="2473" to-port="1"/>
-		<edge from-layer="2473" from-port="2" to-layer="2481" to-port="0"/>
-		<edge from-layer="2474" from-port="0" to-layer="2476" to-port="1"/>
-		<edge from-layer="2475" from-port="0" to-layer="2476" to-port="2"/>
-		<edge from-layer="2476" from-port="3" to-layer="2481" to-port="1"/>
-		<edge from-layer="2477" from-port="0" to-layer="2478" to-port="1"/>
-		<edge from-layer="2478" from-port="2" to-layer="2480" to-port="0"/>
-		<edge from-layer="2479" from-port="0" to-layer="2480" to-port="1"/>
-		<edge from-layer="2480" from-port="2" to-layer="2481" to-port="2"/>
-		<edge from-layer="2481" from-port="3" to-layer="2482" to-port="1"/>
-		<edge from-layer="2482" from-port="2" to-layer="2489" to-port="0"/>
-		<edge from-layer="2483" from-port="0" to-layer="2484" to-port="1"/>
-		<edge from-layer="2484" from-port="2" to-layer="2485" to-port="0"/>
-		<edge from-layer="2485" from-port="2" to-layer="2487" to-port="0"/>
-		<edge from-layer="2486" from-port="0" to-layer="2487" to-port="1"/>
-		<edge from-layer="2487" from-port="2" to-layer="2488" to-port="0"/>
-		<edge from-layer="2488" from-port="2" to-layer="2489" to-port="1"/>
-		<edge from-layer="2489" from-port="2" to-layer="2491" to-port="0"/>
-		<edge from-layer="2490" from-port="0" to-layer="2491" to-port="1"/>
-		<edge from-layer="2491" from-port="2" to-layer="2492" to-port="0"/>
-		<edge from-layer="2492" from-port="1" to-layer="2499" to-port="0"/>
-		<edge from-layer="2493" from-port="0" to-layer="2494" to-port="1"/>
-		<edge from-layer="2494" from-port="2" to-layer="2495" to-port="0"/>
-		<edge from-layer="2495" from-port="2" to-layer="2497" to-port="0"/>
-		<edge from-layer="2496" from-port="0" to-layer="2497" to-port="1"/>
-		<edge from-layer="2497" from-port="2" to-layer="2498" to-port="0"/>
-		<edge from-layer="2498" from-port="2" to-layer="2499" to-port="1"/>
-		<edge from-layer="2499" from-port="2" to-layer="2500" to-port="0"/>
-		<edge from-layer="2499" from-port="2" to-layer="2512" to-port="0"/>
-		<edge from-layer="2500" from-port="1" to-layer="2510" to-port="0"/>
-		<edge from-layer="2500" from-port="1" to-layer="2521" to-port="0"/>
-		<edge from-layer="2500" from-port="1" to-layer="2524" to-port="0"/>
-		<edge from-layer="2500" from-port="1" to-layer="2503" to-port="0"/>
-		<edge from-layer="2501" from-port="0" to-layer="2503" to-port="1"/>
-		<edge from-layer="2502" from-port="0" to-layer="2503" to-port="2"/>
-		<edge from-layer="2503" from-port="3" to-layer="2505" to-port="0"/>
-		<edge from-layer="2503" from-port="3" to-layer="2516" to-port="0"/>
-		<edge from-layer="2504" from-port="0" to-layer="2505" to-port="1"/>
-		<edge from-layer="2505" from-port="2" to-layer="2507" to-port="0"/>
-		<edge from-layer="2506" from-port="0" to-layer="2507" to-port="1"/>
-		<edge from-layer="2507" from-port="2" to-layer="2511" to-port="0"/>
-		<edge from-layer="2508" from-port="0" to-layer="2510" to-port="1"/>
-		<edge from-layer="2509" from-port="0" to-layer="2510" to-port="2"/>
-		<edge from-layer="2510" from-port="3" to-layer="2511" to-port="2"/>
-		<edge from-layer="2511" from-port="3" to-layer="2512" to-port="1"/>
-		<edge from-layer="2512" from-port="2" to-layer="2514" to-port="0"/>
-		<edge from-layer="2513" from-port="0" to-layer="2514" to-port="1"/>
-		<edge from-layer="2514" from-port="2" to-layer="2530" to-port="0"/>
-		<edge from-layer="2515" from-port="0" to-layer="2516" to-port="1"/>
-		<edge from-layer="2516" from-port="2" to-layer="2518" to-port="0"/>
-		<edge from-layer="2517" from-port="0" to-layer="2518" to-port="1"/>
-		<edge from-layer="2518" from-port="2" to-layer="2529" to-port="0"/>
-		<edge from-layer="2519" from-port="0" to-layer="2521" to-port="1"/>
-		<edge from-layer="2520" from-port="0" to-layer="2521" to-port="2"/>
-		<edge from-layer="2521" from-port="3" to-layer="2529" to-port="1"/>
-		<edge from-layer="2522" from-port="0" to-layer="2524" to-port="1"/>
-		<edge from-layer="2523" from-port="0" to-layer="2524" to-port="2"/>
-		<edge from-layer="2524" from-port="3" to-layer="2526" to-port="0"/>
-		<edge from-layer="2525" from-port="0" to-layer="2526" to-port="1"/>
-		<edge from-layer="2526" from-port="2" to-layer="2528" to-port="0"/>
-		<edge from-layer="2527" from-port="0" to-layer="2528" to-port="1"/>
-		<edge from-layer="2528" from-port="2" to-layer="2529" to-port="2"/>
-		<edge from-layer="2529" from-port="3" to-layer="2530" to-port="1"/>
-		<edge from-layer="2530" from-port="2" to-layer="2532" to-port="0"/>
-		<edge from-layer="2531" from-port="0" to-layer="2532" to-port="1"/>
-		<edge from-layer="2532" from-port="2" to-layer="2533" to-port="1"/>
-		<edge from-layer="2533" from-port="2" to-layer="2534" to-port="0"/>
-		<edge from-layer="2534" from-port="2" to-layer="2536" to-port="0"/>
-		<edge from-layer="2534" from-port="2" to-layer="2574" to-port="1"/>
-		<edge from-layer="2535" from-port="0" to-layer="2536" to-port="1"/>
-		<edge from-layer="2536" from-port="2" to-layer="2538" to-port="0"/>
-		<edge from-layer="2537" from-port="0" to-layer="2538" to-port="1"/>
-		<edge from-layer="2538" from-port="2" to-layer="2540" to-port="0"/>
-		<edge from-layer="2539" from-port="0" to-layer="2540" to-port="1"/>
-		<edge from-layer="2540" from-port="2" to-layer="2542" to-port="0"/>
-		<edge from-layer="2541" from-port="0" to-layer="2542" to-port="1"/>
-		<edge from-layer="2542" from-port="2" to-layer="2543" to-port="1"/>
-		<edge from-layer="2543" from-port="2" to-layer="2547" to-port="0"/>
-		<edge from-layer="2543" from-port="2" to-layer="2558" to-port="0"/>
-		<edge from-layer="2543" from-port="2" to-layer="2568" to-port="0"/>
-		<edge from-layer="2544" from-port="0" to-layer="2558" to-port="1"/>
-		<edge from-layer="2545" from-port="0" to-layer="2556" to-port="0"/>
-		<edge from-layer="2546" from-port="0" to-layer="2556" to-port="1"/>
-		<edge from-layer="2547" from-port="1" to-layer="2550" to-port="0"/>
-		<edge from-layer="2548" from-port="0" to-layer="2550" to-port="1"/>
-		<edge from-layer="2549" from-port="0" to-layer="2550" to-port="2"/>
-		<edge from-layer="2550" from-port="3" to-layer="2552" to-port="0"/>
-		<edge from-layer="2551" from-port="0" to-layer="2552" to-port="1"/>
-		<edge from-layer="2552" from-port="2" to-layer="2554" to-port="0"/>
-		<edge from-layer="2553" from-port="0" to-layer="2554" to-port="1"/>
-		<edge from-layer="2554" from-port="2" to-layer="2562" to-port="2"/>
-		<edge from-layer="2554" from-port="2" to-layer="2556" to-port="2"/>
-		<edge from-layer="2554" from-port="2" to-layer="2565" to-port="0"/>
-		<edge from-layer="2555" from-port="0" to-layer="2556" to-port="3"/>
-		<edge from-layer="2556" from-port="4" to-layer="2558" to-port="2"/>
-		<edge from-layer="2557" from-port="0" to-layer="2558" to-port="3"/>
-		<edge from-layer="2558" from-port="4" to-layer="2570" to-port="0"/>
-		<edge from-layer="2559" from-port="0" to-layer="2562" to-port="0"/>
-		<edge from-layer="2560" from-port="0" to-layer="2566" to-port="1"/>
-		<edge from-layer="2560" from-port="0" to-layer="2562" to-port="1"/>
-		<edge from-layer="2561" from-port="0" to-layer="2562" to-port="3"/>
-		<edge from-layer="2561" from-port="0" to-layer="2566" to-port="3"/>
-		<edge from-layer="2562" from-port="4" to-layer="2568" to-port="1"/>
-		<edge from-layer="2563" from-port="0" to-layer="2566" to-port="0"/>
-		<edge from-layer="2564" from-port="0" to-layer="2565" to-port="1"/>
-		<edge from-layer="2565" from-port="2" to-layer="2566" to-port="2"/>
-		<edge from-layer="2566" from-port="4" to-layer="2568" to-port="2"/>
-		<edge from-layer="2567" from-port="0" to-layer="2568" to-port="3"/>
-		<edge from-layer="2568" from-port="4" to-layer="2569" to-port="0"/>
-		<edge from-layer="2569" from-port="1" to-layer="2570" to-port="1"/>
-		<edge from-layer="2570" from-port="2" to-layer="2572" to-port="0"/>
-		<edge from-layer="2571" from-port="0" to-layer="2572" to-port="1"/>
-		<edge from-layer="2572" from-port="2" to-layer="2573" to-port="1"/>
-		<edge from-layer="2573" from-port="2" to-layer="2574" to-port="0"/>
-		<edge from-layer="2574" from-port="2" to-layer="2578" to-port="0"/>
-		<edge from-layer="2575" from-port="0" to-layer="2577" to-port="1"/>
-		<edge from-layer="2576" from-port="0" to-layer="2577" to-port="2"/>
-		<edge from-layer="2577" from-port="3" to-layer="2578" to-port="1"/>
-		<edge from-layer="2578" from-port="2" to-layer="2580" to-port="0"/>
-		<edge from-layer="2579" from-port="0" to-layer="2580" to-port="1"/>
-		<edge from-layer="2580" from-port="2" to-layer="2582" to-port="0"/>
-		<edge from-layer="2581" from-port="0" to-layer="2582" to-port="1"/>
-		<edge from-layer="2582" from-port="2" to-layer="2584" to-port="0"/>
-		<edge from-layer="2583" from-port="0" to-layer="2584" to-port="1"/>
-		<edge from-layer="2584" from-port="2" to-layer="2585" to-port="0"/>
-		<edge from-layer="2585" from-port="2" to-layer="2586" to-port="0"/>
-		<edge from-layer="2586" from-port="2" to-layer="2595" to-port="0"/>
-		<edge from-layer="2586" from-port="2" to-layer="2588" to-port="0"/>
-		<edge from-layer="2586" from-port="2" to-layer="2592" to-port="0"/>
-		<edge from-layer="2587" from-port="0" to-layer="2588" to-port="1"/>
-		<edge from-layer="2588" from-port="2" to-layer="2590" to-port="0"/>
-		<edge from-layer="2589" from-port="0" to-layer="2590" to-port="1"/>
-		<edge from-layer="2590" from-port="2" to-layer="2631" to-port="0"/>
-		<edge from-layer="2591" from-port="0" to-layer="2592" to-port="1"/>
-		<edge from-layer="2592" from-port="2" to-layer="2594" to-port="0"/>
-		<edge from-layer="2593" from-port="0" to-layer="2594" to-port="1"/>
-		<edge from-layer="2594" from-port="2" to-layer="2596" to-port="0"/>
-		<edge from-layer="2595" from-port="1" to-layer="2596" to-port="1"/>
-		<edge from-layer="2596" from-port="2" to-layer="2598" to-port="0"/>
-		<edge from-layer="2597" from-port="0" to-layer="2598" to-port="1"/>
-		<edge from-layer="2598" from-port="2" to-layer="2600" to-port="0"/>
-		<edge from-layer="2599" from-port="0" to-layer="2600" to-port="1"/>
-		<edge from-layer="2600" from-port="2" to-layer="2601" to-port="0"/>
-		<edge from-layer="2601" from-port="1" to-layer="2603" to-port="0"/>
-		<edge from-layer="2602" from-port="0" to-layer="2603" to-port="1"/>
-		<edge from-layer="2603" from-port="2" to-layer="2605" to-port="0"/>
-		<edge from-layer="2604" from-port="0" to-layer="2605" to-port="1"/>
-		<edge from-layer="2605" from-port="2" to-layer="2615" to-port="0"/>
-		<edge from-layer="2606" from-port="1" to-layer="2608" to-port="0"/>
-		<edge from-layer="2607" from-port="0" to-layer="2608" to-port="1"/>
-		<edge from-layer="2608" from-port="2" to-layer="2610" to-port="0"/>
-		<edge from-layer="2609" from-port="0" to-layer="2610" to-port="1"/>
-		<edge from-layer="2610" from-port="2" to-layer="2612" to-port="0"/>
-		<edge from-layer="2611" from-port="0" to-layer="2612" to-port="1"/>
-		<edge from-layer="2612" from-port="2" to-layer="2614" to-port="0"/>
-		<edge from-layer="2613" from-port="0" to-layer="2614" to-port="1"/>
-		<edge from-layer="2614" from-port="2" to-layer="2615" to-port="1"/>
-		<edge from-layer="2615" from-port="2" to-layer="2617" to-port="0"/>
-		<edge from-layer="2615" from-port="2" to-layer="2620" to-port="0"/>
-		<edge from-layer="2616" from-port="0" to-layer="2617" to-port="1"/>
-		<edge from-layer="2617" from-port="2" to-layer="2619" to-port="0"/>
-		<edge from-layer="2618" from-port="0" to-layer="2619" to-port="1"/>
-		<edge from-layer="2619" from-port="2" to-layer="2621" to-port="0"/>
-		<edge from-layer="2620" from-port="1" to-layer="2621" to-port="1"/>
-		<edge from-layer="2621" from-port="2" to-layer="2623" to-port="0"/>
-		<edge from-layer="2622" from-port="0" to-layer="2623" to-port="1"/>
-		<edge from-layer="2623" from-port="2" to-layer="2625" to-port="0"/>
-		<edge from-layer="2624" from-port="0" to-layer="2625" to-port="1"/>
-		<edge from-layer="2625" from-port="2" to-layer="2626" to-port="0"/>
-		<edge from-layer="2626" from-port="1" to-layer="2628" to-port="0"/>
-		<edge from-layer="2627" from-port="0" to-layer="2628" to-port="1"/>
-		<edge from-layer="2628" from-port="2" to-layer="2630" to-port="0"/>
-		<edge from-layer="2629" from-port="0" to-layer="2630" to-port="1"/>
-		<edge from-layer="2630" from-port="2" to-layer="2631" to-port="1"/>
-		<edge from-layer="2631" from-port="2" to-layer="2943" to-port="1"/>
-		<edge from-layer="2631" from-port="2" to-layer="2636" to-port="0"/>
-		<edge from-layer="2631" from-port="2" to-layer="2633" to-port="0"/>
-		<edge from-layer="2632" from-port="0" to-layer="2633" to-port="1"/>
-		<edge from-layer="2633" from-port="2" to-layer="2635" to-port="0"/>
-		<edge from-layer="2634" from-port="0" to-layer="2635" to-port="1"/>
-		<edge from-layer="2635" from-port="2" to-layer="2637" to-port="0"/>
-		<edge from-layer="2636" from-port="1" to-layer="2935" to-port="0"/>
-		<edge from-layer="2636" from-port="1" to-layer="2650" to-port="0"/>
-		<edge from-layer="2636" from-port="1" to-layer="2656" to-port="0"/>
-		<edge from-layer="2636" from-port="1" to-layer="2637" to-port="1"/>
-		<edge from-layer="2636" from-port="1" to-layer="2653" to-port="0"/>
-		<edge from-layer="2636" from-port="1" to-layer="2662" to-port="0"/>
-		<edge from-layer="2637" from-port="2" to-layer="2639" to-port="0"/>
-		<edge from-layer="2638" from-port="0" to-layer="2639" to-port="1"/>
-		<edge from-layer="2639" from-port="2" to-layer="2641" to-port="0"/>
-		<edge from-layer="2640" from-port="0" to-layer="2641" to-port="1"/>
-		<edge from-layer="2641" from-port="2" to-layer="2643" to-port="0"/>
-		<edge from-layer="2642" from-port="0" to-layer="2643" to-port="1"/>
-		<edge from-layer="2643" from-port="2" to-layer="2645" to-port="0"/>
-		<edge from-layer="2644" from-port="0" to-layer="2645" to-port="1"/>
-		<edge from-layer="2645" from-port="2" to-layer="2647" to-port="0"/>
-		<edge from-layer="2646" from-port="0" to-layer="2647" to-port="1"/>
-		<edge from-layer="2647" from-port="2" to-layer="2664" to-port="0"/>
-		<edge from-layer="2648" from-port="0" to-layer="2650" to-port="1"/>
-		<edge from-layer="2649" from-port="0" to-layer="2650" to-port="2"/>
-		<edge from-layer="2650" from-port="3" to-layer="2663" to-port="0"/>
-		<edge from-layer="2651" from-port="0" to-layer="2653" to-port="1"/>
-		<edge from-layer="2652" from-port="0" to-layer="2653" to-port="2"/>
-		<edge from-layer="2653" from-port="3" to-layer="2657" to-port="0"/>
-		<edge from-layer="2654" from-port="0" to-layer="2656" to-port="1"/>
-		<edge from-layer="2655" from-port="0" to-layer="2656" to-port="2"/>
-		<edge from-layer="2656" from-port="3" to-layer="2657" to-port="1"/>
-		<edge from-layer="2657" from-port="2" to-layer="2659" to-port="0"/>
-		<edge from-layer="2658" from-port="0" to-layer="2659" to-port="1"/>
-		<edge from-layer="2659" from-port="2" to-layer="2663" to-port="1"/>
-		<edge from-layer="2660" from-port="0" to-layer="2662" to-port="1"/>
-		<edge from-layer="2661" from-port="0" to-layer="2662" to-port="2"/>
-		<edge from-layer="2662" from-port="3" to-layer="2663" to-port="2"/>
-		<edge from-layer="2663" from-port="3" to-layer="2664" to-port="1"/>
-		<edge from-layer="2664" from-port="2" to-layer="2666" to-port="0"/>
-		<edge from-layer="2664" from-port="2" to-layer="2803" to-port="1"/>
-		<edge from-layer="2665" from-port="0" to-layer="2666" to-port="1"/>
-		<edge from-layer="2666" from-port="2" to-layer="2668" to-port="0"/>
-		<edge from-layer="2667" from-port="0" to-layer="2668" to-port="1"/>
-		<edge from-layer="2668" from-port="2" to-layer="2670" to-port="0"/>
-		<edge from-layer="2669" from-port="0" to-layer="2670" to-port="1"/>
-		<edge from-layer="2670" from-port="2" to-layer="2672" to-port="0"/>
-		<edge from-layer="2670" from-port="2" to-layer="2703" to-port="0"/>
-		<edge from-layer="2670" from-port="2" to-layer="2738" to-port="0"/>
-		<edge from-layer="2671" from-port="0" to-layer="2672" to-port="1"/>
-		<edge from-layer="2672" from-port="2" to-layer="2674" to-port="0"/>
-		<edge from-layer="2672" from-port="2" to-layer="2683" to-port="0"/>
-		<edge from-layer="2673" from-port="0" to-layer="2682" to-port="0"/>
-		<edge from-layer="2674" from-port="1" to-layer="2677" to-port="0"/>
-		<edge from-layer="2674" from-port="1" to-layer="2695" to-port="0"/>
-		<edge from-layer="2674" from-port="1" to-layer="2688" to-port="0"/>
-		<edge from-layer="2675" from-port="0" to-layer="2677" to-port="1"/>
-		<edge from-layer="2676" from-port="0" to-layer="2677" to-port="2"/>
-		<edge from-layer="2677" from-port="3" to-layer="2679" to-port="0"/>
-		<edge from-layer="2677" from-port="3" to-layer="2697" to-port="0"/>
-		<edge from-layer="2678" from-port="0" to-layer="2679" to-port="1"/>
-		<edge from-layer="2679" from-port="2" to-layer="2681" to-port="0"/>
-		<edge from-layer="2680" from-port="0" to-layer="2681" to-port="1"/>
-		<edge from-layer="2681" from-port="2" to-layer="2682" to-port="2"/>
-		<edge from-layer="2682" from-port="3" to-layer="2683" to-port="1"/>
-		<edge from-layer="2683" from-port="2" to-layer="2685" to-port="0"/>
-		<edge from-layer="2684" from-port="0" to-layer="2685" to-port="1"/>
-		<edge from-layer="2685" from-port="2" to-layer="2701" to-port="0"/>
-		<edge from-layer="2686" from-port="0" to-layer="2688" to-port="1"/>
-		<edge from-layer="2687" from-port="0" to-layer="2688" to-port="2"/>
-		<edge from-layer="2688" from-port="3" to-layer="2690" to-port="0"/>
-		<edge from-layer="2689" from-port="0" to-layer="2690" to-port="1"/>
-		<edge from-layer="2690" from-port="2" to-layer="2692" to-port="0"/>
-		<edge from-layer="2691" from-port="0" to-layer="2692" to-port="1"/>
-		<edge from-layer="2692" from-port="2" to-layer="2700" to-port="0"/>
-		<edge from-layer="2693" from-port="0" to-layer="2695" to-port="1"/>
-		<edge from-layer="2694" from-port="0" to-layer="2695" to-port="2"/>
-		<edge from-layer="2695" from-port="3" to-layer="2700" to-port="1"/>
-		<edge from-layer="2696" from-port="0" to-layer="2697" to-port="1"/>
-		<edge from-layer="2697" from-port="2" to-layer="2699" to-port="0"/>
-		<edge from-layer="2698" from-port="0" to-layer="2699" to-port="1"/>
-		<edge from-layer="2699" from-port="2" to-layer="2700" to-port="2"/>
-		<edge from-layer="2700" from-port="3" to-layer="2701" to-port="1"/>
-		<edge from-layer="2701" from-port="2" to-layer="2733" to-port="0"/>
-		<edge from-layer="2702" from-port="0" to-layer="2703" to-port="1"/>
-		<edge from-layer="2703" from-port="2" to-layer="2705" to-port="0"/>
-		<edge from-layer="2703" from-port="2" to-layer="2714" to-port="0"/>
-		<edge from-layer="2704" from-port="0" to-layer="2713" to-port="0"/>
-		<edge from-layer="2705" from-port="1" to-layer="2719" to-port="0"/>
-		<edge from-layer="2705" from-port="1" to-layer="2726" to-port="0"/>
-		<edge from-layer="2705" from-port="1" to-layer="2708" to-port="0"/>
-		<edge from-layer="2706" from-port="0" to-layer="2708" to-port="1"/>
-		<edge from-layer="2707" from-port="0" to-layer="2708" to-port="2"/>
-		<edge from-layer="2708" from-port="3" to-layer="2710" to-port="0"/>
-		<edge from-layer="2708" from-port="3" to-layer="2728" to-port="0"/>
-		<edge from-layer="2709" from-port="0" to-layer="2710" to-port="1"/>
-		<edge from-layer="2710" from-port="2" to-layer="2712" to-port="0"/>
-		<edge from-layer="2711" from-port="0" to-layer="2712" to-port="1"/>
-		<edge from-layer="2712" from-port="2" to-layer="2713" to-port="2"/>
-		<edge from-layer="2713" from-port="3" to-layer="2714" to-port="1"/>
-		<edge from-layer="2714" from-port="2" to-layer="2716" to-port="0"/>
-		<edge from-layer="2715" from-port="0" to-layer="2716" to-port="1"/>
-		<edge from-layer="2716" from-port="2" to-layer="2732" to-port="0"/>
-		<edge from-layer="2717" from-port="0" to-layer="2719" to-port="1"/>
-		<edge from-layer="2718" from-port="0" to-layer="2719" to-port="2"/>
-		<edge from-layer="2719" from-port="3" to-layer="2721" to-port="0"/>
-		<edge from-layer="2720" from-port="0" to-layer="2721" to-port="1"/>
-		<edge from-layer="2721" from-port="2" to-layer="2723" to-port="0"/>
-		<edge from-layer="2722" from-port="0" to-layer="2723" to-port="1"/>
-		<edge from-layer="2723" from-port="2" to-layer="2731" to-port="0"/>
-		<edge from-layer="2724" from-port="0" to-layer="2726" to-port="1"/>
-		<edge from-layer="2725" from-port="0" to-layer="2726" to-port="2"/>
-		<edge from-layer="2726" from-port="3" to-layer="2731" to-port="1"/>
-		<edge from-layer="2727" from-port="0" to-layer="2728" to-port="1"/>
-		<edge from-layer="2728" from-port="2" to-layer="2730" to-port="0"/>
-		<edge from-layer="2729" from-port="0" to-layer="2730" to-port="1"/>
-		<edge from-layer="2730" from-port="2" to-layer="2731" to-port="2"/>
-		<edge from-layer="2731" from-port="3" to-layer="2732" to-port="1"/>
-		<edge from-layer="2732" from-port="2" to-layer="2733" to-port="1"/>
-		<edge from-layer="2733" from-port="2" to-layer="2735" to-port="0"/>
-		<edge from-layer="2734" from-port="0" to-layer="2735" to-port="1"/>
-		<edge from-layer="2735" from-port="2" to-layer="2736" to-port="0"/>
-		<edge from-layer="2736" from-port="1" to-layer="2768" to-port="0"/>
-		<edge from-layer="2737" from-port="0" to-layer="2738" to-port="1"/>
-		<edge from-layer="2738" from-port="2" to-layer="2749" to-port="0"/>
-		<edge from-layer="2738" from-port="2" to-layer="2740" to-port="0"/>
-		<edge from-layer="2739" from-port="0" to-layer="2748" to-port="0"/>
-		<edge from-layer="2740" from-port="1" to-layer="2754" to-port="0"/>
-		<edge from-layer="2740" from-port="1" to-layer="2761" to-port="0"/>
-		<edge from-layer="2740" from-port="1" to-layer="2743" to-port="0"/>
-		<edge from-layer="2741" from-port="0" to-layer="2743" to-port="1"/>
-		<edge from-layer="2742" from-port="0" to-layer="2743" to-port="2"/>
-		<edge from-layer="2743" from-port="3" to-layer="2745" to-port="0"/>
-		<edge from-layer="2743" from-port="3" to-layer="2763" to-port="0"/>
-		<edge from-layer="2744" from-port="0" to-layer="2745" to-port="1"/>
-		<edge from-layer="2745" from-port="2" to-layer="2747" to-port="0"/>
-		<edge from-layer="2746" from-port="0" to-layer="2747" to-port="1"/>
-		<edge from-layer="2747" from-port="2" to-layer="2748" to-port="2"/>
-		<edge from-layer="2748" from-port="3" to-layer="2749" to-port="1"/>
-		<edge from-layer="2749" from-port="2" to-layer="2751" to-port="0"/>
-		<edge from-layer="2750" from-port="0" to-layer="2751" to-port="1"/>
-		<edge from-layer="2751" from-port="2" to-layer="2767" to-port="0"/>
-		<edge from-layer="2752" from-port="0" to-layer="2754" to-port="1"/>
-		<edge from-layer="2753" from-port="0" to-layer="2754" to-port="2"/>
-		<edge from-layer="2754" from-port="3" to-layer="2756" to-port="0"/>
-		<edge from-layer="2755" from-port="0" to-layer="2756" to-port="1"/>
-		<edge from-layer="2756" from-port="2" to-layer="2758" to-port="0"/>
-		<edge from-layer="2757" from-port="0" to-layer="2758" to-port="1"/>
-		<edge from-layer="2758" from-port="2" to-layer="2766" to-port="0"/>
-		<edge from-layer="2759" from-port="0" to-layer="2761" to-port="1"/>
-		<edge from-layer="2760" from-port="0" to-layer="2761" to-port="2"/>
-		<edge from-layer="2761" from-port="3" to-layer="2766" to-port="1"/>
-		<edge from-layer="2762" from-port="0" to-layer="2763" to-port="1"/>
-		<edge from-layer="2763" from-port="2" to-layer="2765" to-port="0"/>
-		<edge from-layer="2764" from-port="0" to-layer="2765" to-port="1"/>
-		<edge from-layer="2765" from-port="2" to-layer="2766" to-port="2"/>
-		<edge from-layer="2766" from-port="3" to-layer="2767" to-port="1"/>
-		<edge from-layer="2767" from-port="2" to-layer="2768" to-port="1"/>
-		<edge from-layer="2768" from-port="2" to-layer="2769" to-port="0"/>
-		<edge from-layer="2768" from-port="2" to-layer="2781" to-port="0"/>
-		<edge from-layer="2769" from-port="1" to-layer="2772" to-port="0"/>
-		<edge from-layer="2769" from-port="1" to-layer="2779" to-port="0"/>
-		<edge from-layer="2769" from-port="1" to-layer="2793" to-port="0"/>
-		<edge from-layer="2769" from-port="1" to-layer="2790" to-port="0"/>
-		<edge from-layer="2770" from-port="0" to-layer="2772" to-port="1"/>
-		<edge from-layer="2771" from-port="0" to-layer="2772" to-port="2"/>
-		<edge from-layer="2772" from-port="3" to-layer="2774" to-port="0"/>
-		<edge from-layer="2772" from-port="3" to-layer="2785" to-port="0"/>
-		<edge from-layer="2773" from-port="0" to-layer="2774" to-port="1"/>
-		<edge from-layer="2774" from-port="2" to-layer="2776" to-port="0"/>
-		<edge from-layer="2775" from-port="0" to-layer="2776" to-port="1"/>
-		<edge from-layer="2776" from-port="2" to-layer="2780" to-port="0"/>
-		<edge from-layer="2777" from-port="0" to-layer="2779" to-port="1"/>
-		<edge from-layer="2778" from-port="0" to-layer="2779" to-port="2"/>
-		<edge from-layer="2779" from-port="3" to-layer="2780" to-port="2"/>
-		<edge from-layer="2780" from-port="3" to-layer="2781" to-port="1"/>
-		<edge from-layer="2781" from-port="2" to-layer="2783" to-port="0"/>
-		<edge from-layer="2782" from-port="0" to-layer="2783" to-port="1"/>
-		<edge from-layer="2783" from-port="2" to-layer="2799" to-port="0"/>
-		<edge from-layer="2784" from-port="0" to-layer="2785" to-port="1"/>
-		<edge from-layer="2785" from-port="2" to-layer="2787" to-port="0"/>
-		<edge from-layer="2786" from-port="0" to-layer="2787" to-port="1"/>
-		<edge from-layer="2787" from-port="2" to-layer="2798" to-port="0"/>
-		<edge from-layer="2788" from-port="0" to-layer="2790" to-port="1"/>
-		<edge from-layer="2789" from-port="0" to-layer="2790" to-port="2"/>
-		<edge from-layer="2790" from-port="3" to-layer="2798" to-port="1"/>
-		<edge from-layer="2791" from-port="0" to-layer="2793" to-port="1"/>
-		<edge from-layer="2792" from-port="0" to-layer="2793" to-port="2"/>
-		<edge from-layer="2793" from-port="3" to-layer="2795" to-port="0"/>
-		<edge from-layer="2794" from-port="0" to-layer="2795" to-port="1"/>
-		<edge from-layer="2795" from-port="2" to-layer="2797" to-port="0"/>
-		<edge from-layer="2796" from-port="0" to-layer="2797" to-port="1"/>
-		<edge from-layer="2797" from-port="2" to-layer="2798" to-port="2"/>
-		<edge from-layer="2798" from-port="3" to-layer="2799" to-port="1"/>
-		<edge from-layer="2799" from-port="2" to-layer="2801" to-port="0"/>
-		<edge from-layer="2800" from-port="0" to-layer="2801" to-port="1"/>
-		<edge from-layer="2801" from-port="2" to-layer="2802" to-port="1"/>
-		<edge from-layer="2802" from-port="2" to-layer="2803" to-port="0"/>
-		<edge from-layer="2803" from-port="2" to-layer="2892" to-port="1"/>
-		<edge from-layer="2803" from-port="2" to-layer="2805" to-port="0"/>
-		<edge from-layer="2804" from-port="0" to-layer="2805" to-port="1"/>
-		<edge from-layer="2805" from-port="2" to-layer="2807" to-port="0"/>
-		<edge from-layer="2806" from-port="0" to-layer="2807" to-port="1"/>
-		<edge from-layer="2807" from-port="2" to-layer="2809" to-port="0"/>
-		<edge from-layer="2808" from-port="0" to-layer="2809" to-port="1"/>
-		<edge from-layer="2809" from-port="2" to-layer="2811" to-port="0"/>
-		<edge from-layer="2810" from-port="0" to-layer="2811" to-port="1"/>
-		<edge from-layer="2811" from-port="2" to-layer="2822" to-port="0"/>
-		<edge from-layer="2811" from-port="2" to-layer="2813" to-port="0"/>
-		<edge from-layer="2812" from-port="0" to-layer="2821" to-port="0"/>
-		<edge from-layer="2813" from-port="1" to-layer="2816" to-port="0"/>
-		<edge from-layer="2813" from-port="1" to-layer="2834" to-port="0"/>
-		<edge from-layer="2813" from-port="1" to-layer="2827" to-port="0"/>
-		<edge from-layer="2814" from-port="0" to-layer="2816" to-port="1"/>
-		<edge from-layer="2815" from-port="0" to-layer="2816" to-port="2"/>
-		<edge from-layer="2816" from-port="3" to-layer="2836" to-port="0"/>
-		<edge from-layer="2816" from-port="3" to-layer="2818" to-port="0"/>
-		<edge from-layer="2817" from-port="0" to-layer="2818" to-port="1"/>
-		<edge from-layer="2818" from-port="2" to-layer="2820" to-port="0"/>
-		<edge from-layer="2819" from-port="0" to-layer="2820" to-port="1"/>
-		<edge from-layer="2820" from-port="2" to-layer="2821" to-port="2"/>
-		<edge from-layer="2821" from-port="3" to-layer="2822" to-port="1"/>
-		<edge from-layer="2822" from-port="2" to-layer="2824" to-port="0"/>
-		<edge from-layer="2823" from-port="0" to-layer="2824" to-port="1"/>
-		<edge from-layer="2824" from-port="2" to-layer="2840" to-port="0"/>
-		<edge from-layer="2825" from-port="0" to-layer="2827" to-port="1"/>
-		<edge from-layer="2826" from-port="0" to-layer="2827" to-port="2"/>
-		<edge from-layer="2827" from-port="3" to-layer="2829" to-port="0"/>
-		<edge from-layer="2828" from-port="0" to-layer="2829" to-port="1"/>
-		<edge from-layer="2829" from-port="2" to-layer="2831" to-port="0"/>
-		<edge from-layer="2830" from-port="0" to-layer="2831" to-port="1"/>
-		<edge from-layer="2831" from-port="2" to-layer="2839" to-port="0"/>
-		<edge from-layer="2832" from-port="0" to-layer="2834" to-port="1"/>
-		<edge from-layer="2833" from-port="0" to-layer="2834" to-port="2"/>
-		<edge from-layer="2834" from-port="3" to-layer="2839" to-port="1"/>
-		<edge from-layer="2835" from-port="0" to-layer="2836" to-port="1"/>
-		<edge from-layer="2836" from-port="2" to-layer="2838" to-port="0"/>
-		<edge from-layer="2837" from-port="0" to-layer="2838" to-port="1"/>
-		<edge from-layer="2838" from-port="2" to-layer="2839" to-port="2"/>
-		<edge from-layer="2839" from-port="3" to-layer="2840" to-port="1"/>
-		<edge from-layer="2840" from-port="2" to-layer="2847" to-port="0"/>
-		<edge from-layer="2841" from-port="0" to-layer="2842" to-port="1"/>
-		<edge from-layer="2842" from-port="2" to-layer="2843" to-port="0"/>
-		<edge from-layer="2843" from-port="2" to-layer="2845" to-port="0"/>
-		<edge from-layer="2844" from-port="0" to-layer="2845" to-port="1"/>
-		<edge from-layer="2845" from-port="2" to-layer="2846" to-port="0"/>
-		<edge from-layer="2846" from-port="2" to-layer="2847" to-port="1"/>
-		<edge from-layer="2847" from-port="2" to-layer="2849" to-port="0"/>
-		<edge from-layer="2848" from-port="0" to-layer="2849" to-port="1"/>
-		<edge from-layer="2849" from-port="2" to-layer="2850" to-port="0"/>
-		<edge from-layer="2850" from-port="1" to-layer="2857" to-port="0"/>
-		<edge from-layer="2851" from-port="0" to-layer="2852" to-port="1"/>
-		<edge from-layer="2852" from-port="2" to-layer="2853" to-port="0"/>
-		<edge from-layer="2853" from-port="2" to-layer="2855" to-port="0"/>
-		<edge from-layer="2854" from-port="0" to-layer="2855" to-port="1"/>
-		<edge from-layer="2855" from-port="2" to-layer="2856" to-port="0"/>
-		<edge from-layer="2856" from-port="2" to-layer="2857" to-port="1"/>
-		<edge from-layer="2857" from-port="2" to-layer="2858" to-port="0"/>
-		<edge from-layer="2857" from-port="2" to-layer="2870" to-port="0"/>
-		<edge from-layer="2858" from-port="1" to-layer="2882" to-port="0"/>
-		<edge from-layer="2858" from-port="1" to-layer="2879" to-port="0"/>
-		<edge from-layer="2858" from-port="1" to-layer="2861" to-port="0"/>
-		<edge from-layer="2858" from-port="1" to-layer="2868" to-port="0"/>
-		<edge from-layer="2859" from-port="0" to-layer="2861" to-port="1"/>
-		<edge from-layer="2860" from-port="0" to-layer="2861" to-port="2"/>
-		<edge from-layer="2861" from-port="3" to-layer="2863" to-port="0"/>
-		<edge from-layer="2861" from-port="3" to-layer="2874" to-port="0"/>
-		<edge from-layer="2862" from-port="0" to-layer="2863" to-port="1"/>
-		<edge from-layer="2863" from-port="2" to-layer="2865" to-port="0"/>
-		<edge from-layer="2864" from-port="0" to-layer="2865" to-port="1"/>
-		<edge from-layer="2865" from-port="2" to-layer="2869" to-port="0"/>
-		<edge from-layer="2866" from-port="0" to-layer="2868" to-port="1"/>
-		<edge from-layer="2867" from-port="0" to-layer="2868" to-port="2"/>
-		<edge from-layer="2868" from-port="3" to-layer="2869" to-port="2"/>
-		<edge from-layer="2869" from-port="3" to-layer="2870" to-port="1"/>
-		<edge from-layer="2870" from-port="2" to-layer="2872" to-port="0"/>
-		<edge from-layer="2871" from-port="0" to-layer="2872" to-port="1"/>
-		<edge from-layer="2872" from-port="2" to-layer="2888" to-port="0"/>
-		<edge from-layer="2873" from-port="0" to-layer="2874" to-port="1"/>
-		<edge from-layer="2874" from-port="2" to-layer="2876" to-port="0"/>
-		<edge from-layer="2875" from-port="0" to-layer="2876" to-port="1"/>
-		<edge from-layer="2876" from-port="2" to-layer="2887" to-port="0"/>
-		<edge from-layer="2877" from-port="0" to-layer="2879" to-port="1"/>
-		<edge from-layer="2878" from-port="0" to-layer="2879" to-port="2"/>
-		<edge from-layer="2879" from-port="3" to-layer="2887" to-port="1"/>
-		<edge from-layer="2880" from-port="0" to-layer="2882" to-port="1"/>
-		<edge from-layer="2881" from-port="0" to-layer="2882" to-port="2"/>
-		<edge from-layer="2882" from-port="3" to-layer="2884" to-port="0"/>
-		<edge from-layer="2883" from-port="0" to-layer="2884" to-port="1"/>
-		<edge from-layer="2884" from-port="2" to-layer="2886" to-port="0"/>
-		<edge from-layer="2885" from-port="0" to-layer="2886" to-port="1"/>
-		<edge from-layer="2886" from-port="2" to-layer="2887" to-port="2"/>
-		<edge from-layer="2887" from-port="3" to-layer="2888" to-port="1"/>
-		<edge from-layer="2888" from-port="2" to-layer="2890" to-port="0"/>
-		<edge from-layer="2889" from-port="0" to-layer="2890" to-port="1"/>
-		<edge from-layer="2890" from-port="2" to-layer="2891" to-port="1"/>
-		<edge from-layer="2891" from-port="2" to-layer="2892" to-port="0"/>
-		<edge from-layer="2892" from-port="2" to-layer="2932" to-port="1"/>
-		<edge from-layer="2892" from-port="2" to-layer="2894" to-port="0"/>
-		<edge from-layer="2893" from-port="0" to-layer="2894" to-port="1"/>
-		<edge from-layer="2894" from-port="2" to-layer="2896" to-port="0"/>
-		<edge from-layer="2895" from-port="0" to-layer="2896" to-port="1"/>
-		<edge from-layer="2896" from-port="2" to-layer="2898" to-port="0"/>
-		<edge from-layer="2897" from-port="0" to-layer="2898" to-port="1"/>
-		<edge from-layer="2898" from-port="2" to-layer="2900" to-port="0"/>
-		<edge from-layer="2899" from-port="0" to-layer="2900" to-port="1"/>
-		<edge from-layer="2900" from-port="2" to-layer="2901" to-port="1"/>
-		<edge from-layer="2901" from-port="2" to-layer="2905" to-port="0"/>
-		<edge from-layer="2901" from-port="2" to-layer="2926" to-port="0"/>
-		<edge from-layer="2901" from-port="2" to-layer="2916" to-port="0"/>
-		<edge from-layer="2902" from-port="0" to-layer="2916" to-port="1"/>
-		<edge from-layer="2903" from-port="0" to-layer="2914" to-port="0"/>
-		<edge from-layer="2904" from-port="0" to-layer="2914" to-port="1"/>
-		<edge from-layer="2905" from-port="1" to-layer="2908" to-port="0"/>
-		<edge from-layer="2906" from-port="0" to-layer="2908" to-port="1"/>
-		<edge from-layer="2907" from-port="0" to-layer="2908" to-port="2"/>
-		<edge from-layer="2908" from-port="3" to-layer="2910" to-port="0"/>
-		<edge from-layer="2909" from-port="0" to-layer="2910" to-port="1"/>
-		<edge from-layer="2910" from-port="2" to-layer="2912" to-port="0"/>
-		<edge from-layer="2911" from-port="0" to-layer="2912" to-port="1"/>
-		<edge from-layer="2912" from-port="2" to-layer="2920" to-port="2"/>
-		<edge from-layer="2912" from-port="2" to-layer="2914" to-port="2"/>
-		<edge from-layer="2912" from-port="2" to-layer="2923" to-port="0"/>
-		<edge from-layer="2913" from-port="0" to-layer="2914" to-port="3"/>
-		<edge from-layer="2914" from-port="4" to-layer="2916" to-port="2"/>
-		<edge from-layer="2915" from-port="0" to-layer="2916" to-port="3"/>
-		<edge from-layer="2916" from-port="4" to-layer="2928" to-port="0"/>
-		<edge from-layer="2917" from-port="0" to-layer="2920" to-port="0"/>
-		<edge from-layer="2918" from-port="0" to-layer="2924" to-port="1"/>
-		<edge from-layer="2918" from-port="0" to-layer="2920" to-port="1"/>
-		<edge from-layer="2919" from-port="0" to-layer="2924" to-port="3"/>
-		<edge from-layer="2919" from-port="0" to-layer="2920" to-port="3"/>
-		<edge from-layer="2920" from-port="4" to-layer="2926" to-port="1"/>
-		<edge from-layer="2921" from-port="0" to-layer="2924" to-port="0"/>
-		<edge from-layer="2922" from-port="0" to-layer="2923" to-port="1"/>
-		<edge from-layer="2923" from-port="2" to-layer="2924" to-port="2"/>
-		<edge from-layer="2924" from-port="4" to-layer="2926" to-port="2"/>
-		<edge from-layer="2925" from-port="0" to-layer="2926" to-port="3"/>
-		<edge from-layer="2926" from-port="4" to-layer="2927" to-port="0"/>
-		<edge from-layer="2927" from-port="1" to-layer="2928" to-port="1"/>
-		<edge from-layer="2928" from-port="2" to-layer="2930" to-port="0"/>
-		<edge from-layer="2929" from-port="0" to-layer="2930" to-port="1"/>
-		<edge from-layer="2930" from-port="2" to-layer="2931" to-port="1"/>
-		<edge from-layer="2931" from-port="2" to-layer="2932" to-port="0"/>
-		<edge from-layer="2932" from-port="2" to-layer="2936" to-port="0"/>
-		<edge from-layer="2933" from-port="0" to-layer="2935" to-port="1"/>
-		<edge from-layer="2934" from-port="0" to-layer="2935" to-port="2"/>
-		<edge from-layer="2935" from-port="3" to-layer="2936" to-port="1"/>
-		<edge from-layer="2936" from-port="2" to-layer="2938" to-port="0"/>
-		<edge from-layer="2937" from-port="0" to-layer="2938" to-port="1"/>
-		<edge from-layer="2938" from-port="2" to-layer="2940" to-port="0"/>
-		<edge from-layer="2939" from-port="0" to-layer="2940" to-port="1"/>
-		<edge from-layer="2940" from-port="2" to-layer="2942" to-port="0"/>
-		<edge from-layer="2941" from-port="0" to-layer="2942" to-port="1"/>
-		<edge from-layer="2942" from-port="2" to-layer="2943" to-port="0"/>
-		<edge from-layer="2943" from-port="2" to-layer="2944" to-port="0"/>
-		<edge from-layer="2944" from-port="2" to-layer="2953" to-port="0"/>
-		<edge from-layer="2944" from-port="2" to-layer="2946" to-port="0"/>
-		<edge from-layer="2944" from-port="2" to-layer="2950" to-port="0"/>
-		<edge from-layer="2945" from-port="0" to-layer="2946" to-port="1"/>
-		<edge from-layer="2946" from-port="2" to-layer="2948" to-port="0"/>
-		<edge from-layer="2947" from-port="0" to-layer="2948" to-port="1"/>
-		<edge from-layer="2948" from-port="2" to-layer="2989" to-port="0"/>
-		<edge from-layer="2949" from-port="0" to-layer="2950" to-port="1"/>
-		<edge from-layer="2950" from-port="2" to-layer="2952" to-port="0"/>
-		<edge from-layer="2951" from-port="0" to-layer="2952" to-port="1"/>
-		<edge from-layer="2952" from-port="2" to-layer="2954" to-port="0"/>
-		<edge from-layer="2953" from-port="1" to-layer="2954" to-port="1"/>
-		<edge from-layer="2954" from-port="2" to-layer="2956" to-port="0"/>
-		<edge from-layer="2955" from-port="0" to-layer="2956" to-port="1"/>
-		<edge from-layer="2956" from-port="2" to-layer="2958" to-port="0"/>
-		<edge from-layer="2957" from-port="0" to-layer="2958" to-port="1"/>
-		<edge from-layer="2958" from-port="2" to-layer="2959" to-port="0"/>
-		<edge from-layer="2959" from-port="1" to-layer="2961" to-port="0"/>
-		<edge from-layer="2960" from-port="0" to-layer="2961" to-port="1"/>
-		<edge from-layer="2961" from-port="2" to-layer="2963" to-port="0"/>
-		<edge from-layer="2962" from-port="0" to-layer="2963" to-port="1"/>
-		<edge from-layer="2963" from-port="2" to-layer="2973" to-port="0"/>
-		<edge from-layer="2964" from-port="1" to-layer="2966" to-port="0"/>
-		<edge from-layer="2965" from-port="0" to-layer="2966" to-port="1"/>
-		<edge from-layer="2966" from-port="2" to-layer="2968" to-port="0"/>
-		<edge from-layer="2967" from-port="0" to-layer="2968" to-port="1"/>
-		<edge from-layer="2968" from-port="2" to-layer="2970" to-port="0"/>
-		<edge from-layer="2969" from-port="0" to-layer="2970" to-port="1"/>
-		<edge from-layer="2970" from-port="2" to-layer="2972" to-port="0"/>
-		<edge from-layer="2971" from-port="0" to-layer="2972" to-port="1"/>
-		<edge from-layer="2972" from-port="2" to-layer="2973" to-port="1"/>
-		<edge from-layer="2973" from-port="2" to-layer="2975" to-port="0"/>
-		<edge from-layer="2973" from-port="2" to-layer="2978" to-port="0"/>
-		<edge from-layer="2974" from-port="0" to-layer="2975" to-port="1"/>
-		<edge from-layer="2975" from-port="2" to-layer="2977" to-port="0"/>
-		<edge from-layer="2976" from-port="0" to-layer="2977" to-port="1"/>
-		<edge from-layer="2977" from-port="2" to-layer="2979" to-port="0"/>
-		<edge from-layer="2978" from-port="1" to-layer="2979" to-port="1"/>
-		<edge from-layer="2979" from-port="2" to-layer="2981" to-port="0"/>
-		<edge from-layer="2980" from-port="0" to-layer="2981" to-port="1"/>
-		<edge from-layer="2981" from-port="2" to-layer="2983" to-port="0"/>
-		<edge from-layer="2982" from-port="0" to-layer="2983" to-port="1"/>
-		<edge from-layer="2983" from-port="2" to-layer="2984" to-port="0"/>
-		<edge from-layer="2984" from-port="1" to-layer="2986" to-port="0"/>
-		<edge from-layer="2985" from-port="0" to-layer="2986" to-port="1"/>
-		<edge from-layer="2986" from-port="2" to-layer="2988" to-port="0"/>
-		<edge from-layer="2987" from-port="0" to-layer="2988" to-port="1"/>
-		<edge from-layer="2988" from-port="2" to-layer="2989" to-port="1"/>
-		<edge from-layer="2989" from-port="2" to-layer="2994" to-port="0"/>
-		<edge from-layer="2989" from-port="2" to-layer="2991" to-port="0"/>
-		<edge from-layer="2989" from-port="2" to-layer="3301" to-port="1"/>
-		<edge from-layer="2990" from-port="0" to-layer="2991" to-port="1"/>
-		<edge from-layer="2991" from-port="2" to-layer="2993" to-port="0"/>
-		<edge from-layer="2992" from-port="0" to-layer="2993" to-port="1"/>
-		<edge from-layer="2993" from-port="2" to-layer="2995" to-port="0"/>
-		<edge from-layer="2994" from-port="1" to-layer="3293" to-port="0"/>
-		<edge from-layer="2994" from-port="1" to-layer="3014" to-port="0"/>
-		<edge from-layer="2994" from-port="1" to-layer="3011" to-port="0"/>
-		<edge from-layer="2994" from-port="1" to-layer="3008" to-port="0"/>
-		<edge from-layer="2994" from-port="1" to-layer="3020" to-port="0"/>
-		<edge from-layer="2994" from-port="1" to-layer="2995" to-port="1"/>
-		<edge from-layer="2995" from-port="2" to-layer="2997" to-port="0"/>
-		<edge from-layer="2996" from-port="0" to-layer="2997" to-port="1"/>
-		<edge from-layer="2997" from-port="2" to-layer="2999" to-port="0"/>
-		<edge from-layer="2998" from-port="0" to-layer="2999" to-port="1"/>
-		<edge from-layer="2999" from-port="2" to-layer="3001" to-port="0"/>
-		<edge from-layer="3000" from-port="0" to-layer="3001" to-port="1"/>
-		<edge from-layer="3001" from-port="2" to-layer="3003" to-port="0"/>
-		<edge from-layer="3002" from-port="0" to-layer="3003" to-port="1"/>
-		<edge from-layer="3003" from-port="2" to-layer="3005" to-port="0"/>
-		<edge from-layer="3004" from-port="0" to-layer="3005" to-port="1"/>
-		<edge from-layer="3005" from-port="2" to-layer="3022" to-port="0"/>
-		<edge from-layer="3006" from-port="0" to-layer="3008" to-port="1"/>
-		<edge from-layer="3007" from-port="0" to-layer="3008" to-port="2"/>
-		<edge from-layer="3008" from-port="3" to-layer="3021" to-port="0"/>
-		<edge from-layer="3009" from-port="0" to-layer="3011" to-port="1"/>
-		<edge from-layer="3010" from-port="0" to-layer="3011" to-port="2"/>
-		<edge from-layer="3011" from-port="3" to-layer="3015" to-port="0"/>
-		<edge from-layer="3012" from-port="0" to-layer="3014" to-port="1"/>
-		<edge from-layer="3013" from-port="0" to-layer="3014" to-port="2"/>
-		<edge from-layer="3014" from-port="3" to-layer="3015" to-port="1"/>
-		<edge from-layer="3015" from-port="2" to-layer="3017" to-port="0"/>
-		<edge from-layer="3016" from-port="0" to-layer="3017" to-port="1"/>
-		<edge from-layer="3017" from-port="2" to-layer="3021" to-port="1"/>
-		<edge from-layer="3018" from-port="0" to-layer="3020" to-port="1"/>
-		<edge from-layer="3019" from-port="0" to-layer="3020" to-port="2"/>
-		<edge from-layer="3020" from-port="3" to-layer="3021" to-port="2"/>
-		<edge from-layer="3021" from-port="3" to-layer="3022" to-port="1"/>
-		<edge from-layer="3022" from-port="2" to-layer="3024" to-port="0"/>
-		<edge from-layer="3022" from-port="2" to-layer="3161" to-port="1"/>
-		<edge from-layer="3023" from-port="0" to-layer="3024" to-port="1"/>
-		<edge from-layer="3024" from-port="2" to-layer="3026" to-port="0"/>
-		<edge from-layer="3025" from-port="0" to-layer="3026" to-port="1"/>
-		<edge from-layer="3026" from-port="2" to-layer="3028" to-port="0"/>
-		<edge from-layer="3027" from-port="0" to-layer="3028" to-port="1"/>
-		<edge from-layer="3028" from-port="2" to-layer="3061" to-port="0"/>
-		<edge from-layer="3028" from-port="2" to-layer="3030" to-port="0"/>
-		<edge from-layer="3028" from-port="2" to-layer="3096" to-port="0"/>
-		<edge from-layer="3029" from-port="0" to-layer="3030" to-port="1"/>
-		<edge from-layer="3030" from-port="2" to-layer="3032" to-port="0"/>
-		<edge from-layer="3030" from-port="2" to-layer="3041" to-port="0"/>
-		<edge from-layer="3031" from-port="0" to-layer="3040" to-port="0"/>
-		<edge from-layer="3032" from-port="1" to-layer="3035" to-port="0"/>
-		<edge from-layer="3032" from-port="1" to-layer="3046" to-port="0"/>
-		<edge from-layer="3032" from-port="1" to-layer="3053" to-port="0"/>
-		<edge from-layer="3033" from-port="0" to-layer="3035" to-port="1"/>
-		<edge from-layer="3034" from-port="0" to-layer="3035" to-port="2"/>
-		<edge from-layer="3035" from-port="3" to-layer="3037" to-port="0"/>
-		<edge from-layer="3035" from-port="3" to-layer="3055" to-port="0"/>
-		<edge from-layer="3036" from-port="0" to-layer="3037" to-port="1"/>
-		<edge from-layer="3037" from-port="2" to-layer="3039" to-port="0"/>
-		<edge from-layer="3038" from-port="0" to-layer="3039" to-port="1"/>
-		<edge from-layer="3039" from-port="2" to-layer="3040" to-port="2"/>
-		<edge from-layer="3040" from-port="3" to-layer="3041" to-port="1"/>
-		<edge from-layer="3041" from-port="2" to-layer="3043" to-port="0"/>
-		<edge from-layer="3042" from-port="0" to-layer="3043" to-port="1"/>
-		<edge from-layer="3043" from-port="2" to-layer="3059" to-port="0"/>
-		<edge from-layer="3044" from-port="0" to-layer="3046" to-port="1"/>
-		<edge from-layer="3045" from-port="0" to-layer="3046" to-port="2"/>
-		<edge from-layer="3046" from-port="3" to-layer="3048" to-port="0"/>
-		<edge from-layer="3047" from-port="0" to-layer="3048" to-port="1"/>
-		<edge from-layer="3048" from-port="2" to-layer="3050" to-port="0"/>
-		<edge from-layer="3049" from-port="0" to-layer="3050" to-port="1"/>
-		<edge from-layer="3050" from-port="2" to-layer="3058" to-port="0"/>
-		<edge from-layer="3051" from-port="0" to-layer="3053" to-port="1"/>
-		<edge from-layer="3052" from-port="0" to-layer="3053" to-port="2"/>
-		<edge from-layer="3053" from-port="3" to-layer="3058" to-port="1"/>
-		<edge from-layer="3054" from-port="0" to-layer="3055" to-port="1"/>
-		<edge from-layer="3055" from-port="2" to-layer="3057" to-port="0"/>
-		<edge from-layer="3056" from-port="0" to-layer="3057" to-port="1"/>
-		<edge from-layer="3057" from-port="2" to-layer="3058" to-port="2"/>
-		<edge from-layer="3058" from-port="3" to-layer="3059" to-port="1"/>
-		<edge from-layer="3059" from-port="2" to-layer="3091" to-port="0"/>
-		<edge from-layer="3060" from-port="0" to-layer="3061" to-port="1"/>
-		<edge from-layer="3061" from-port="2" to-layer="3072" to-port="0"/>
-		<edge from-layer="3061" from-port="2" to-layer="3063" to-port="0"/>
-		<edge from-layer="3062" from-port="0" to-layer="3071" to-port="0"/>
-		<edge from-layer="3063" from-port="1" to-layer="3066" to-port="0"/>
-		<edge from-layer="3063" from-port="1" to-layer="3084" to-port="0"/>
-		<edge from-layer="3063" from-port="1" to-layer="3077" to-port="0"/>
-		<edge from-layer="3064" from-port="0" to-layer="3066" to-port="1"/>
-		<edge from-layer="3065" from-port="0" to-layer="3066" to-port="2"/>
-		<edge from-layer="3066" from-port="3" to-layer="3068" to-port="0"/>
-		<edge from-layer="3066" from-port="3" to-layer="3086" to-port="0"/>
-		<edge from-layer="3067" from-port="0" to-layer="3068" to-port="1"/>
-		<edge from-layer="3068" from-port="2" to-layer="3070" to-port="0"/>
-		<edge from-layer="3069" from-port="0" to-layer="3070" to-port="1"/>
-		<edge from-layer="3070" from-port="2" to-layer="3071" to-port="2"/>
-		<edge from-layer="3071" from-port="3" to-layer="3072" to-port="1"/>
-		<edge from-layer="3072" from-port="2" to-layer="3074" to-port="0"/>
-		<edge from-layer="3073" from-port="0" to-layer="3074" to-port="1"/>
-		<edge from-layer="3074" from-port="2" to-layer="3090" to-port="0"/>
-		<edge from-layer="3075" from-port="0" to-layer="3077" to-port="1"/>
-		<edge from-layer="3076" from-port="0" to-layer="3077" to-port="2"/>
-		<edge from-layer="3077" from-port="3" to-layer="3079" to-port="0"/>
-		<edge from-layer="3078" from-port="0" to-layer="3079" to-port="1"/>
-		<edge from-layer="3079" from-port="2" to-layer="3081" to-port="0"/>
-		<edge from-layer="3080" from-port="0" to-layer="3081" to-port="1"/>
-		<edge from-layer="3081" from-port="2" to-layer="3089" to-port="0"/>
-		<edge from-layer="3082" from-port="0" to-layer="3084" to-port="1"/>
-		<edge from-layer="3083" from-port="0" to-layer="3084" to-port="2"/>
-		<edge from-layer="3084" from-port="3" to-layer="3089" to-port="1"/>
-		<edge from-layer="3085" from-port="0" to-layer="3086" to-port="1"/>
-		<edge from-layer="3086" from-port="2" to-layer="3088" to-port="0"/>
-		<edge from-layer="3087" from-port="0" to-layer="3088" to-port="1"/>
-		<edge from-layer="3088" from-port="2" to-layer="3089" to-port="2"/>
-		<edge from-layer="3089" from-port="3" to-layer="3090" to-port="1"/>
-		<edge from-layer="3090" from-port="2" to-layer="3091" to-port="1"/>
-		<edge from-layer="3091" from-port="2" to-layer="3093" to-port="0"/>
-		<edge from-layer="3092" from-port="0" to-layer="3093" to-port="1"/>
-		<edge from-layer="3093" from-port="2" to-layer="3094" to-port="0"/>
-		<edge from-layer="3094" from-port="1" to-layer="3126" to-port="0"/>
-		<edge from-layer="3095" from-port="0" to-layer="3096" to-port="1"/>
-		<edge from-layer="3096" from-port="2" to-layer="3107" to-port="0"/>
-		<edge from-layer="3096" from-port="2" to-layer="3098" to-port="0"/>
-		<edge from-layer="3097" from-port="0" to-layer="3106" to-port="0"/>
-		<edge from-layer="3098" from-port="1" to-layer="3101" to-port="0"/>
-		<edge from-layer="3098" from-port="1" to-layer="3119" to-port="0"/>
-		<edge from-layer="3098" from-port="1" to-layer="3112" to-port="0"/>
-		<edge from-layer="3099" from-port="0" to-layer="3101" to-port="1"/>
-		<edge from-layer="3100" from-port="0" to-layer="3101" to-port="2"/>
-		<edge from-layer="3101" from-port="3" to-layer="3121" to-port="0"/>
-		<edge from-layer="3101" from-port="3" to-layer="3103" to-port="0"/>
-		<edge from-layer="3102" from-port="0" to-layer="3103" to-port="1"/>
-		<edge from-layer="3103" from-port="2" to-layer="3105" to-port="0"/>
-		<edge from-layer="3104" from-port="0" to-layer="3105" to-port="1"/>
-		<edge from-layer="3105" from-port="2" to-layer="3106" to-port="2"/>
-		<edge from-layer="3106" from-port="3" to-layer="3107" to-port="1"/>
-		<edge from-layer="3107" from-port="2" to-layer="3109" to-port="0"/>
-		<edge from-layer="3108" from-port="0" to-layer="3109" to-port="1"/>
-		<edge from-layer="3109" from-port="2" to-layer="3125" to-port="0"/>
-		<edge from-layer="3110" from-port="0" to-layer="3112" to-port="1"/>
-		<edge from-layer="3111" from-port="0" to-layer="3112" to-port="2"/>
-		<edge from-layer="3112" from-port="3" to-layer="3114" to-port="0"/>
-		<edge from-layer="3113" from-port="0" to-layer="3114" to-port="1"/>
-		<edge from-layer="3114" from-port="2" to-layer="3116" to-port="0"/>
-		<edge from-layer="3115" from-port="0" to-layer="3116" to-port="1"/>
-		<edge from-layer="3116" from-port="2" to-layer="3124" to-port="0"/>
-		<edge from-layer="3117" from-port="0" to-layer="3119" to-port="1"/>
-		<edge from-layer="3118" from-port="0" to-layer="3119" to-port="2"/>
-		<edge from-layer="3119" from-port="3" to-layer="3124" to-port="1"/>
-		<edge from-layer="3120" from-port="0" to-layer="3121" to-port="1"/>
-		<edge from-layer="3121" from-port="2" to-layer="3123" to-port="0"/>
-		<edge from-layer="3122" from-port="0" to-layer="3123" to-port="1"/>
-		<edge from-layer="3123" from-port="2" to-layer="3124" to-port="2"/>
-		<edge from-layer="3124" from-port="3" to-layer="3125" to-port="1"/>
-		<edge from-layer="3125" from-port="2" to-layer="3126" to-port="1"/>
-		<edge from-layer="3126" from-port="2" to-layer="3127" to-port="0"/>
-		<edge from-layer="3126" from-port="2" to-layer="3139" to-port="0"/>
-		<edge from-layer="3127" from-port="1" to-layer="3148" to-port="0"/>
-		<edge from-layer="3127" from-port="1" to-layer="3137" to-port="0"/>
-		<edge from-layer="3127" from-port="1" to-layer="3151" to-port="0"/>
-		<edge from-layer="3127" from-port="1" to-layer="3130" to-port="0"/>
-		<edge from-layer="3128" from-port="0" to-layer="3130" to-port="1"/>
-		<edge from-layer="3129" from-port="0" to-layer="3130" to-port="2"/>
-		<edge from-layer="3130" from-port="3" to-layer="3143" to-port="0"/>
-		<edge from-layer="3130" from-port="3" to-layer="3132" to-port="0"/>
-		<edge from-layer="3131" from-port="0" to-layer="3132" to-port="1"/>
-		<edge from-layer="3132" from-port="2" to-layer="3134" to-port="0"/>
-		<edge from-layer="3133" from-port="0" to-layer="3134" to-port="1"/>
-		<edge from-layer="3134" from-port="2" to-layer="3138" to-port="0"/>
-		<edge from-layer="3135" from-port="0" to-layer="3137" to-port="1"/>
-		<edge from-layer="3136" from-port="0" to-layer="3137" to-port="2"/>
-		<edge from-layer="3137" from-port="3" to-layer="3138" to-port="2"/>
-		<edge from-layer="3138" from-port="3" to-layer="3139" to-port="1"/>
-		<edge from-layer="3139" from-port="2" to-layer="3141" to-port="0"/>
-		<edge from-layer="3140" from-port="0" to-layer="3141" to-port="1"/>
-		<edge from-layer="3141" from-port="2" to-layer="3157" to-port="0"/>
-		<edge from-layer="3142" from-port="0" to-layer="3143" to-port="1"/>
-		<edge from-layer="3143" from-port="2" to-layer="3145" to-port="0"/>
-		<edge from-layer="3144" from-port="0" to-layer="3145" to-port="1"/>
-		<edge from-layer="3145" from-port="2" to-layer="3156" to-port="0"/>
-		<edge from-layer="3146" from-port="0" to-layer="3148" to-port="1"/>
-		<edge from-layer="3147" from-port="0" to-layer="3148" to-port="2"/>
-		<edge from-layer="3148" from-port="3" to-layer="3156" to-port="1"/>
-		<edge from-layer="3149" from-port="0" to-layer="3151" to-port="1"/>
-		<edge from-layer="3150" from-port="0" to-layer="3151" to-port="2"/>
-		<edge from-layer="3151" from-port="3" to-layer="3153" to-port="0"/>
-		<edge from-layer="3152" from-port="0" to-layer="3153" to-port="1"/>
-		<edge from-layer="3153" from-port="2" to-layer="3155" to-port="0"/>
-		<edge from-layer="3154" from-port="0" to-layer="3155" to-port="1"/>
-		<edge from-layer="3155" from-port="2" to-layer="3156" to-port="2"/>
-		<edge from-layer="3156" from-port="3" to-layer="3157" to-port="1"/>
-		<edge from-layer="3157" from-port="2" to-layer="3159" to-port="0"/>
-		<edge from-layer="3158" from-port="0" to-layer="3159" to-port="1"/>
-		<edge from-layer="3159" from-port="2" to-layer="3160" to-port="1"/>
-		<edge from-layer="3160" from-port="2" to-layer="3161" to-port="0"/>
-		<edge from-layer="3161" from-port="2" to-layer="3163" to-port="0"/>
-		<edge from-layer="3161" from-port="2" to-layer="3250" to-port="1"/>
-		<edge from-layer="3162" from-port="0" to-layer="3163" to-port="1"/>
-		<edge from-layer="3163" from-port="2" to-layer="3165" to-port="0"/>
-		<edge from-layer="3164" from-port="0" to-layer="3165" to-port="1"/>
-		<edge from-layer="3165" from-port="2" to-layer="3167" to-port="0"/>
-		<edge from-layer="3166" from-port="0" to-layer="3167" to-port="1"/>
-		<edge from-layer="3167" from-port="2" to-layer="3169" to-port="0"/>
-		<edge from-layer="3168" from-port="0" to-layer="3169" to-port="1"/>
-		<edge from-layer="3169" from-port="2" to-layer="3180" to-port="0"/>
-		<edge from-layer="3169" from-port="2" to-layer="3171" to-port="0"/>
-		<edge from-layer="3170" from-port="0" to-layer="3179" to-port="0"/>
-		<edge from-layer="3171" from-port="1" to-layer="3174" to-port="0"/>
-		<edge from-layer="3171" from-port="1" to-layer="3185" to-port="0"/>
-		<edge from-layer="3171" from-port="1" to-layer="3192" to-port="0"/>
-		<edge from-layer="3172" from-port="0" to-layer="3174" to-port="1"/>
-		<edge from-layer="3173" from-port="0" to-layer="3174" to-port="2"/>
-		<edge from-layer="3174" from-port="3" to-layer="3194" to-port="0"/>
-		<edge from-layer="3174" from-port="3" to-layer="3176" to-port="0"/>
-		<edge from-layer="3175" from-port="0" to-layer="3176" to-port="1"/>
-		<edge from-layer="3176" from-port="2" to-layer="3178" to-port="0"/>
-		<edge from-layer="3177" from-port="0" to-layer="3178" to-port="1"/>
-		<edge from-layer="3178" from-port="2" to-layer="3179" to-port="2"/>
-		<edge from-layer="3179" from-port="3" to-layer="3180" to-port="1"/>
-		<edge from-layer="3180" from-port="2" to-layer="3182" to-port="0"/>
-		<edge from-layer="3181" from-port="0" to-layer="3182" to-port="1"/>
-		<edge from-layer="3182" from-port="2" to-layer="3198" to-port="0"/>
-		<edge from-layer="3183" from-port="0" to-layer="3185" to-port="1"/>
-		<edge from-layer="3184" from-port="0" to-layer="3185" to-port="2"/>
-		<edge from-layer="3185" from-port="3" to-layer="3187" to-port="0"/>
-		<edge from-layer="3186" from-port="0" to-layer="3187" to-port="1"/>
-		<edge from-layer="3187" from-port="2" to-layer="3189" to-port="0"/>
-		<edge from-layer="3188" from-port="0" to-layer="3189" to-port="1"/>
-		<edge from-layer="3189" from-port="2" to-layer="3197" to-port="0"/>
-		<edge from-layer="3190" from-port="0" to-layer="3192" to-port="1"/>
-		<edge from-layer="3191" from-port="0" to-layer="3192" to-port="2"/>
-		<edge from-layer="3192" from-port="3" to-layer="3197" to-port="1"/>
-		<edge from-layer="3193" from-port="0" to-layer="3194" to-port="1"/>
-		<edge from-layer="3194" from-port="2" to-layer="3196" to-port="0"/>
-		<edge from-layer="3195" from-port="0" to-layer="3196" to-port="1"/>
-		<edge from-layer="3196" from-port="2" to-layer="3197" to-port="2"/>
-		<edge from-layer="3197" from-port="3" to-layer="3198" to-port="1"/>
-		<edge from-layer="3198" from-port="2" to-layer="3205" to-port="0"/>
-		<edge from-layer="3199" from-port="0" to-layer="3200" to-port="1"/>
-		<edge from-layer="3200" from-port="2" to-layer="3201" to-port="0"/>
-		<edge from-layer="3201" from-port="2" to-layer="3203" to-port="0"/>
-		<edge from-layer="3202" from-port="0" to-layer="3203" to-port="1"/>
-		<edge from-layer="3203" from-port="2" to-layer="3204" to-port="0"/>
-		<edge from-layer="3204" from-port="2" to-layer="3205" to-port="1"/>
-		<edge from-layer="3205" from-port="2" to-layer="3207" to-port="0"/>
-		<edge from-layer="3206" from-port="0" to-layer="3207" to-port="1"/>
-		<edge from-layer="3207" from-port="2" to-layer="3208" to-port="0"/>
-		<edge from-layer="3208" from-port="1" to-layer="3215" to-port="0"/>
-		<edge from-layer="3209" from-port="0" to-layer="3210" to-port="1"/>
-		<edge from-layer="3210" from-port="2" to-layer="3211" to-port="0"/>
-		<edge from-layer="3211" from-port="2" to-layer="3213" to-port="0"/>
-		<edge from-layer="3212" from-port="0" to-layer="3213" to-port="1"/>
-		<edge from-layer="3213" from-port="2" to-layer="3214" to-port="0"/>
-		<edge from-layer="3214" from-port="2" to-layer="3215" to-port="1"/>
-		<edge from-layer="3215" from-port="2" to-layer="3228" to-port="0"/>
-		<edge from-layer="3215" from-port="2" to-layer="3216" to-port="0"/>
-		<edge from-layer="3216" from-port="1" to-layer="3240" to-port="0"/>
-		<edge from-layer="3216" from-port="1" to-layer="3237" to-port="0"/>
-		<edge from-layer="3216" from-port="1" to-layer="3219" to-port="0"/>
-		<edge from-layer="3216" from-port="1" to-layer="3226" to-port="0"/>
-		<edge from-layer="3217" from-port="0" to-layer="3219" to-port="1"/>
-		<edge from-layer="3218" from-port="0" to-layer="3219" to-port="2"/>
-		<edge from-layer="3219" from-port="3" to-layer="3232" to-port="0"/>
-		<edge from-layer="3219" from-port="3" to-layer="3221" to-port="0"/>
-		<edge from-layer="3220" from-port="0" to-layer="3221" to-port="1"/>
-		<edge from-layer="3221" from-port="2" to-layer="3223" to-port="0"/>
-		<edge from-layer="3222" from-port="0" to-layer="3223" to-port="1"/>
-		<edge from-layer="3223" from-port="2" to-layer="3227" to-port="0"/>
-		<edge from-layer="3224" from-port="0" to-layer="3226" to-port="1"/>
-		<edge from-layer="3225" from-port="0" to-layer="3226" to-port="2"/>
-		<edge from-layer="3226" from-port="3" to-layer="3227" to-port="2"/>
-		<edge from-layer="3227" from-port="3" to-layer="3228" to-port="1"/>
-		<edge from-layer="3228" from-port="2" to-layer="3230" to-port="0"/>
-		<edge from-layer="3229" from-port="0" to-layer="3230" to-port="1"/>
-		<edge from-layer="3230" from-port="2" to-layer="3246" to-port="0"/>
-		<edge from-layer="3231" from-port="0" to-layer="3232" to-port="1"/>
-		<edge from-layer="3232" from-port="2" to-layer="3234" to-port="0"/>
-		<edge from-layer="3233" from-port="0" to-layer="3234" to-port="1"/>
-		<edge from-layer="3234" from-port="2" to-layer="3245" to-port="0"/>
-		<edge from-layer="3235" from-port="0" to-layer="3237" to-port="1"/>
-		<edge from-layer="3236" from-port="0" to-layer="3237" to-port="2"/>
-		<edge from-layer="3237" from-port="3" to-layer="3245" to-port="1"/>
-		<edge from-layer="3238" from-port="0" to-layer="3240" to-port="1"/>
-		<edge from-layer="3239" from-port="0" to-layer="3240" to-port="2"/>
-		<edge from-layer="3240" from-port="3" to-layer="3242" to-port="0"/>
-		<edge from-layer="3241" from-port="0" to-layer="3242" to-port="1"/>
-		<edge from-layer="3242" from-port="2" to-layer="3244" to-port="0"/>
-		<edge from-layer="3243" from-port="0" to-layer="3244" to-port="1"/>
-		<edge from-layer="3244" from-port="2" to-layer="3245" to-port="2"/>
-		<edge from-layer="3245" from-port="3" to-layer="3246" to-port="1"/>
-		<edge from-layer="3246" from-port="2" to-layer="3248" to-port="0"/>
-		<edge from-layer="3247" from-port="0" to-layer="3248" to-port="1"/>
-		<edge from-layer="3248" from-port="2" to-layer="3249" to-port="1"/>
-		<edge from-layer="3249" from-port="2" to-layer="3250" to-port="0"/>
-		<edge from-layer="3250" from-port="2" to-layer="3252" to-port="0"/>
-		<edge from-layer="3250" from-port="2" to-layer="3290" to-port="1"/>
-		<edge from-layer="3251" from-port="0" to-layer="3252" to-port="1"/>
-		<edge from-layer="3252" from-port="2" to-layer="3254" to-port="0"/>
-		<edge from-layer="3253" from-port="0" to-layer="3254" to-port="1"/>
-		<edge from-layer="3254" from-port="2" to-layer="3256" to-port="0"/>
-		<edge from-layer="3255" from-port="0" to-layer="3256" to-port="1"/>
-		<edge from-layer="3256" from-port="2" to-layer="3258" to-port="0"/>
-		<edge from-layer="3257" from-port="0" to-layer="3258" to-port="1"/>
-		<edge from-layer="3258" from-port="2" to-layer="3259" to-port="1"/>
-		<edge from-layer="3259" from-port="2" to-layer="3263" to-port="0"/>
-		<edge from-layer="3259" from-port="2" to-layer="3274" to-port="0"/>
-		<edge from-layer="3259" from-port="2" to-layer="3284" to-port="0"/>
-		<edge from-layer="3260" from-port="0" to-layer="3274" to-port="1"/>
-		<edge from-layer="3261" from-port="0" to-layer="3272" to-port="0"/>
-		<edge from-layer="3262" from-port="0" to-layer="3272" to-port="1"/>
-		<edge from-layer="3263" from-port="1" to-layer="3266" to-port="0"/>
-		<edge from-layer="3264" from-port="0" to-layer="3266" to-port="1"/>
-		<edge from-layer="3265" from-port="0" to-layer="3266" to-port="2"/>
-		<edge from-layer="3266" from-port="3" to-layer="3268" to-port="0"/>
-		<edge from-layer="3267" from-port="0" to-layer="3268" to-port="1"/>
-		<edge from-layer="3268" from-port="2" to-layer="3270" to-port="0"/>
-		<edge from-layer="3269" from-port="0" to-layer="3270" to-port="1"/>
-		<edge from-layer="3270" from-port="2" to-layer="3278" to-port="2"/>
-		<edge from-layer="3270" from-port="2" to-layer="3281" to-port="0"/>
-		<edge from-layer="3270" from-port="2" to-layer="3272" to-port="2"/>
-		<edge from-layer="3271" from-port="0" to-layer="3272" to-port="3"/>
-		<edge from-layer="3272" from-port="4" to-layer="3274" to-port="2"/>
-		<edge from-layer="3273" from-port="0" to-layer="3274" to-port="3"/>
-		<edge from-layer="3274" from-port="4" to-layer="3286" to-port="0"/>
-		<edge from-layer="3275" from-port="0" to-layer="3278" to-port="0"/>
-		<edge from-layer="3276" from-port="0" to-layer="3282" to-port="1"/>
-		<edge from-layer="3276" from-port="0" to-layer="3278" to-port="1"/>
-		<edge from-layer="3277" from-port="0" to-layer="3278" to-port="3"/>
-		<edge from-layer="3277" from-port="0" to-layer="3282" to-port="3"/>
-		<edge from-layer="3278" from-port="4" to-layer="3284" to-port="1"/>
-		<edge from-layer="3279" from-port="0" to-layer="3282" to-port="0"/>
-		<edge from-layer="3280" from-port="0" to-layer="3281" to-port="1"/>
-		<edge from-layer="3281" from-port="2" to-layer="3282" to-port="2"/>
-		<edge from-layer="3282" from-port="4" to-layer="3284" to-port="2"/>
-		<edge from-layer="3283" from-port="0" to-layer="3284" to-port="3"/>
-		<edge from-layer="3284" from-port="4" to-layer="3285" to-port="0"/>
-		<edge from-layer="3285" from-port="1" to-layer="3286" to-port="1"/>
-		<edge from-layer="3286" from-port="2" to-layer="3288" to-port="0"/>
-		<edge from-layer="3287" from-port="0" to-layer="3288" to-port="1"/>
-		<edge from-layer="3288" from-port="2" to-layer="3289" to-port="1"/>
-		<edge from-layer="3289" from-port="2" to-layer="3290" to-port="0"/>
-		<edge from-layer="3290" from-port="2" to-layer="3294" to-port="0"/>
-		<edge from-layer="3291" from-port="0" to-layer="3293" to-port="1"/>
-		<edge from-layer="3292" from-port="0" to-layer="3293" to-port="2"/>
-		<edge from-layer="3293" from-port="3" to-layer="3294" to-port="1"/>
-		<edge from-layer="3294" from-port="2" to-layer="3296" to-port="0"/>
-		<edge from-layer="3295" from-port="0" to-layer="3296" to-port="1"/>
-		<edge from-layer="3296" from-port="2" to-layer="3298" to-port="0"/>
-		<edge from-layer="3297" from-port="0" to-layer="3298" to-port="1"/>
-		<edge from-layer="3298" from-port="2" to-layer="3300" to-port="0"/>
-		<edge from-layer="3299" from-port="0" to-layer="3300" to-port="1"/>
-		<edge from-layer="3300" from-port="2" to-layer="3301" to-port="0"/>
-		<edge from-layer="3301" from-port="2" to-layer="3302" to-port="0"/>
-		<edge from-layer="3301" from-port="2" to-layer="3306" to-port="0"/>
-		<edge from-layer="3302" from-port="1" to-layer="3303" to-port="0"/>
-		<edge from-layer="3303" from-port="1" to-layer="3304" to-port="0"/>
-		<edge from-layer="3304" from-port="2" to-layer="3305" to-port="0"/>
-		<edge from-layer="3305" from-port="1" to-layer="3306" to-port="1"/>
-		<edge from-layer="3306" from-port="3" to-layer="3308" to-port="0"/>
-		<edge from-layer="3307" from-port="0" to-layer="3308" to-port="1"/>
-		<edge from-layer="3308" from-port="2" to-layer="3310" to-port="0"/>
-		<edge from-layer="3309" from-port="0" to-layer="3310" to-port="1"/>
-		<edge from-layer="3310" from-port="2" to-layer="3311" to-port="0"/>
-		<edge from-layer="3311" from-port="2" to-layer="3313" to-port="0"/>
-		<edge from-layer="3311" from-port="2" to-layer="3320" to-port="0"/>
-		<edge from-layer="3311" from-port="2" to-layer="3317" to-port="0"/>
-		<edge from-layer="3312" from-port="0" to-layer="3313" to-port="1"/>
-		<edge from-layer="3313" from-port="2" to-layer="3315" to-port="0"/>
-		<edge from-layer="3314" from-port="0" to-layer="3315" to-port="1"/>
-		<edge from-layer="3315" from-port="2" to-layer="3356" to-port="0"/>
-		<edge from-layer="3316" from-port="0" to-layer="3317" to-port="1"/>
-		<edge from-layer="3317" from-port="2" to-layer="3319" to-port="0"/>
-		<edge from-layer="3318" from-port="0" to-layer="3319" to-port="1"/>
-		<edge from-layer="3319" from-port="2" to-layer="3321" to-port="0"/>
-		<edge from-layer="3320" from-port="1" to-layer="3321" to-port="1"/>
-		<edge from-layer="3321" from-port="2" to-layer="3323" to-port="0"/>
-		<edge from-layer="3322" from-port="0" to-layer="3323" to-port="1"/>
-		<edge from-layer="3323" from-port="2" to-layer="3325" to-port="0"/>
-		<edge from-layer="3324" from-port="0" to-layer="3325" to-port="1"/>
-		<edge from-layer="3325" from-port="2" to-layer="3326" to-port="0"/>
-		<edge from-layer="3326" from-port="1" to-layer="3328" to-port="0"/>
-		<edge from-layer="3327" from-port="0" to-layer="3328" to-port="1"/>
-		<edge from-layer="3328" from-port="2" to-layer="3330" to-port="0"/>
-		<edge from-layer="3329" from-port="0" to-layer="3330" to-port="1"/>
-		<edge from-layer="3330" from-port="2" to-layer="3340" to-port="0"/>
-		<edge from-layer="3331" from-port="1" to-layer="3333" to-port="0"/>
-		<edge from-layer="3332" from-port="0" to-layer="3333" to-port="1"/>
-		<edge from-layer="3333" from-port="2" to-layer="3335" to-port="0"/>
-		<edge from-layer="3334" from-port="0" to-layer="3335" to-port="1"/>
-		<edge from-layer="3335" from-port="2" to-layer="3337" to-port="0"/>
-		<edge from-layer="3336" from-port="0" to-layer="3337" to-port="1"/>
-		<edge from-layer="3337" from-port="2" to-layer="3339" to-port="0"/>
-		<edge from-layer="3338" from-port="0" to-layer="3339" to-port="1"/>
-		<edge from-layer="3339" from-port="2" to-layer="3340" to-port="1"/>
-		<edge from-layer="3340" from-port="2" to-layer="3345" to-port="0"/>
-		<edge from-layer="3340" from-port="2" to-layer="3342" to-port="0"/>
-		<edge from-layer="3341" from-port="0" to-layer="3342" to-port="1"/>
-		<edge from-layer="3342" from-port="2" to-layer="3344" to-port="0"/>
-		<edge from-layer="3343" from-port="0" to-layer="3344" to-port="1"/>
-		<edge from-layer="3344" from-port="2" to-layer="3346" to-port="0"/>
-		<edge from-layer="3345" from-port="1" to-layer="3346" to-port="1"/>
-		<edge from-layer="3346" from-port="2" to-layer="3348" to-port="0"/>
-		<edge from-layer="3347" from-port="0" to-layer="3348" to-port="1"/>
-		<edge from-layer="3348" from-port="2" to-layer="3350" to-port="0"/>
-		<edge from-layer="3349" from-port="0" to-layer="3350" to-port="1"/>
-		<edge from-layer="3350" from-port="2" to-layer="3351" to-port="0"/>
-		<edge from-layer="3351" from-port="1" to-layer="3353" to-port="0"/>
-		<edge from-layer="3352" from-port="0" to-layer="3353" to-port="1"/>
-		<edge from-layer="3353" from-port="2" to-layer="3355" to-port="0"/>
-		<edge from-layer="3354" from-port="0" to-layer="3355" to-port="1"/>
-		<edge from-layer="3355" from-port="2" to-layer="3356" to-port="1"/>
-		<edge from-layer="3356" from-port="2" to-layer="3358" to-port="0"/>
-		<edge from-layer="3356" from-port="2" to-layer="3361" to-port="0"/>
-		<edge from-layer="3356" from-port="2" to-layer="3668" to-port="1"/>
-		<edge from-layer="3357" from-port="0" to-layer="3358" to-port="1"/>
-		<edge from-layer="3358" from-port="2" to-layer="3360" to-port="0"/>
-		<edge from-layer="3359" from-port="0" to-layer="3360" to-port="1"/>
-		<edge from-layer="3360" from-port="2" to-layer="3362" to-port="0"/>
-		<edge from-layer="3361" from-port="1" to-layer="3660" to-port="0"/>
-		<edge from-layer="3361" from-port="1" to-layer="3362" to-port="1"/>
-		<edge from-layer="3361" from-port="1" to-layer="3387" to-port="0"/>
-		<edge from-layer="3361" from-port="1" to-layer="3381" to-port="0"/>
-		<edge from-layer="3361" from-port="1" to-layer="3375" to-port="0"/>
-		<edge from-layer="3361" from-port="1" to-layer="3378" to-port="0"/>
-		<edge from-layer="3362" from-port="2" to-layer="3364" to-port="0"/>
-		<edge from-layer="3363" from-port="0" to-layer="3364" to-port="1"/>
-		<edge from-layer="3364" from-port="2" to-layer="3366" to-port="0"/>
-		<edge from-layer="3365" from-port="0" to-layer="3366" to-port="1"/>
-		<edge from-layer="3366" from-port="2" to-layer="3368" to-port="0"/>
-		<edge from-layer="3367" from-port="0" to-layer="3368" to-port="1"/>
-		<edge from-layer="3368" from-port="2" to-layer="3370" to-port="0"/>
-		<edge from-layer="3369" from-port="0" to-layer="3370" to-port="1"/>
-		<edge from-layer="3370" from-port="2" to-layer="3372" to-port="0"/>
-		<edge from-layer="3371" from-port="0" to-layer="3372" to-port="1"/>
-		<edge from-layer="3372" from-port="2" to-layer="3389" to-port="0"/>
-		<edge from-layer="3373" from-port="0" to-layer="3375" to-port="1"/>
-		<edge from-layer="3374" from-port="0" to-layer="3375" to-port="2"/>
-		<edge from-layer="3375" from-port="3" to-layer="3388" to-port="0"/>
-		<edge from-layer="3376" from-port="0" to-layer="3378" to-port="1"/>
-		<edge from-layer="3377" from-port="0" to-layer="3378" to-port="2"/>
-		<edge from-layer="3378" from-port="3" to-layer="3382" to-port="0"/>
-		<edge from-layer="3379" from-port="0" to-layer="3381" to-port="1"/>
-		<edge from-layer="3380" from-port="0" to-layer="3381" to-port="2"/>
-		<edge from-layer="3381" from-port="3" to-layer="3382" to-port="1"/>
-		<edge from-layer="3382" from-port="2" to-layer="3384" to-port="0"/>
-		<edge from-layer="3383" from-port="0" to-layer="3384" to-port="1"/>
-		<edge from-layer="3384" from-port="2" to-layer="3388" to-port="1"/>
-		<edge from-layer="3385" from-port="0" to-layer="3387" to-port="1"/>
-		<edge from-layer="3386" from-port="0" to-layer="3387" to-port="2"/>
-		<edge from-layer="3387" from-port="3" to-layer="3388" to-port="2"/>
-		<edge from-layer="3388" from-port="3" to-layer="3389" to-port="1"/>
-		<edge from-layer="3389" from-port="2" to-layer="3528" to-port="1"/>
-		<edge from-layer="3389" from-port="2" to-layer="3391" to-port="0"/>
-		<edge from-layer="3390" from-port="0" to-layer="3391" to-port="1"/>
-		<edge from-layer="3391" from-port="2" to-layer="3393" to-port="0"/>
-		<edge from-layer="3392" from-port="0" to-layer="3393" to-port="1"/>
-		<edge from-layer="3393" from-port="2" to-layer="3395" to-port="0"/>
-		<edge from-layer="3394" from-port="0" to-layer="3395" to-port="1"/>
-		<edge from-layer="3395" from-port="2" to-layer="3397" to-port="0"/>
-		<edge from-layer="3395" from-port="2" to-layer="3463" to-port="0"/>
-		<edge from-layer="3395" from-port="2" to-layer="3428" to-port="0"/>
-		<edge from-layer="3396" from-port="0" to-layer="3397" to-port="1"/>
-		<edge from-layer="3397" from-port="2" to-layer="3408" to-port="0"/>
-		<edge from-layer="3397" from-port="2" to-layer="3399" to-port="0"/>
-		<edge from-layer="3398" from-port="0" to-layer="3407" to-port="0"/>
-		<edge from-layer="3399" from-port="1" to-layer="3413" to-port="0"/>
-		<edge from-layer="3399" from-port="1" to-layer="3420" to-port="0"/>
-		<edge from-layer="3399" from-port="1" to-layer="3402" to-port="0"/>
-		<edge from-layer="3400" from-port="0" to-layer="3402" to-port="1"/>
-		<edge from-layer="3401" from-port="0" to-layer="3402" to-port="2"/>
-		<edge from-layer="3402" from-port="3" to-layer="3404" to-port="0"/>
-		<edge from-layer="3402" from-port="3" to-layer="3422" to-port="0"/>
-		<edge from-layer="3403" from-port="0" to-layer="3404" to-port="1"/>
-		<edge from-layer="3404" from-port="2" to-layer="3406" to-port="0"/>
-		<edge from-layer="3405" from-port="0" to-layer="3406" to-port="1"/>
-		<edge from-layer="3406" from-port="2" to-layer="3407" to-port="2"/>
-		<edge from-layer="3407" from-port="3" to-layer="3408" to-port="1"/>
-		<edge from-layer="3408" from-port="2" to-layer="3410" to-port="0"/>
-		<edge from-layer="3409" from-port="0" to-layer="3410" to-port="1"/>
-		<edge from-layer="3410" from-port="2" to-layer="3426" to-port="0"/>
-		<edge from-layer="3411" from-port="0" to-layer="3413" to-port="1"/>
-		<edge from-layer="3412" from-port="0" to-layer="3413" to-port="2"/>
-		<edge from-layer="3413" from-port="3" to-layer="3415" to-port="0"/>
-		<edge from-layer="3414" from-port="0" to-layer="3415" to-port="1"/>
-		<edge from-layer="3415" from-port="2" to-layer="3417" to-port="0"/>
-		<edge from-layer="3416" from-port="0" to-layer="3417" to-port="1"/>
-		<edge from-layer="3417" from-port="2" to-layer="3425" to-port="0"/>
-		<edge from-layer="3418" from-port="0" to-layer="3420" to-port="1"/>
-		<edge from-layer="3419" from-port="0" to-layer="3420" to-port="2"/>
-		<edge from-layer="3420" from-port="3" to-layer="3425" to-port="1"/>
-		<edge from-layer="3421" from-port="0" to-layer="3422" to-port="1"/>
-		<edge from-layer="3422" from-port="2" to-layer="3424" to-port="0"/>
-		<edge from-layer="3423" from-port="0" to-layer="3424" to-port="1"/>
-		<edge from-layer="3424" from-port="2" to-layer="3425" to-port="2"/>
-		<edge from-layer="3425" from-port="3" to-layer="3426" to-port="1"/>
-		<edge from-layer="3426" from-port="2" to-layer="3458" to-port="0"/>
-		<edge from-layer="3427" from-port="0" to-layer="3428" to-port="1"/>
-		<edge from-layer="3428" from-port="2" to-layer="3439" to-port="0"/>
-		<edge from-layer="3428" from-port="2" to-layer="3430" to-port="0"/>
-		<edge from-layer="3429" from-port="0" to-layer="3438" to-port="0"/>
-		<edge from-layer="3430" from-port="1" to-layer="3433" to-port="0"/>
-		<edge from-layer="3430" from-port="1" to-layer="3451" to-port="0"/>
-		<edge from-layer="3430" from-port="1" to-layer="3444" to-port="0"/>
-		<edge from-layer="3431" from-port="0" to-layer="3433" to-port="1"/>
-		<edge from-layer="3432" from-port="0" to-layer="3433" to-port="2"/>
-		<edge from-layer="3433" from-port="3" to-layer="3453" to-port="0"/>
-		<edge from-layer="3433" from-port="3" to-layer="3435" to-port="0"/>
-		<edge from-layer="3434" from-port="0" to-layer="3435" to-port="1"/>
-		<edge from-layer="3435" from-port="2" to-layer="3437" to-port="0"/>
-		<edge from-layer="3436" from-port="0" to-layer="3437" to-port="1"/>
-		<edge from-layer="3437" from-port="2" to-layer="3438" to-port="2"/>
-		<edge from-layer="3438" from-port="3" to-layer="3439" to-port="1"/>
-		<edge from-layer="3439" from-port="2" to-layer="3441" to-port="0"/>
-		<edge from-layer="3440" from-port="0" to-layer="3441" to-port="1"/>
-		<edge from-layer="3441" from-port="2" to-layer="3457" to-port="0"/>
-		<edge from-layer="3442" from-port="0" to-layer="3444" to-port="1"/>
-		<edge from-layer="3443" from-port="0" to-layer="3444" to-port="2"/>
-		<edge from-layer="3444" from-port="3" to-layer="3446" to-port="0"/>
-		<edge from-layer="3445" from-port="0" to-layer="3446" to-port="1"/>
-		<edge from-layer="3446" from-port="2" to-layer="3448" to-port="0"/>
-		<edge from-layer="3447" from-port="0" to-layer="3448" to-port="1"/>
-		<edge from-layer="3448" from-port="2" to-layer="3456" to-port="0"/>
-		<edge from-layer="3449" from-port="0" to-layer="3451" to-port="1"/>
-		<edge from-layer="3450" from-port="0" to-layer="3451" to-port="2"/>
-		<edge from-layer="3451" from-port="3" to-layer="3456" to-port="1"/>
-		<edge from-layer="3452" from-port="0" to-layer="3453" to-port="1"/>
-		<edge from-layer="3453" from-port="2" to-layer="3455" to-port="0"/>
-		<edge from-layer="3454" from-port="0" to-layer="3455" to-port="1"/>
-		<edge from-layer="3455" from-port="2" to-layer="3456" to-port="2"/>
-		<edge from-layer="3456" from-port="3" to-layer="3457" to-port="1"/>
-		<edge from-layer="3457" from-port="2" to-layer="3458" to-port="1"/>
-		<edge from-layer="3458" from-port="2" to-layer="3460" to-port="0"/>
-		<edge from-layer="3459" from-port="0" to-layer="3460" to-port="1"/>
-		<edge from-layer="3460" from-port="2" to-layer="3461" to-port="0"/>
-		<edge from-layer="3461" from-port="1" to-layer="3493" to-port="0"/>
-		<edge from-layer="3462" from-port="0" to-layer="3463" to-port="1"/>
-		<edge from-layer="3463" from-port="2" to-layer="3465" to-port="0"/>
-		<edge from-layer="3463" from-port="2" to-layer="3474" to-port="0"/>
-		<edge from-layer="3464" from-port="0" to-layer="3473" to-port="0"/>
-		<edge from-layer="3465" from-port="1" to-layer="3468" to-port="0"/>
-		<edge from-layer="3465" from-port="1" to-layer="3486" to-port="0"/>
-		<edge from-layer="3465" from-port="1" to-layer="3479" to-port="0"/>
-		<edge from-layer="3466" from-port="0" to-layer="3468" to-port="1"/>
-		<edge from-layer="3467" from-port="0" to-layer="3468" to-port="2"/>
-		<edge from-layer="3468" from-port="3" to-layer="3488" to-port="0"/>
-		<edge from-layer="3468" from-port="3" to-layer="3470" to-port="0"/>
-		<edge from-layer="3469" from-port="0" to-layer="3470" to-port="1"/>
-		<edge from-layer="3470" from-port="2" to-layer="3472" to-port="0"/>
-		<edge from-layer="3471" from-port="0" to-layer="3472" to-port="1"/>
-		<edge from-layer="3472" from-port="2" to-layer="3473" to-port="2"/>
-		<edge from-layer="3473" from-port="3" to-layer="3474" to-port="1"/>
-		<edge from-layer="3474" from-port="2" to-layer="3476" to-port="0"/>
-		<edge from-layer="3475" from-port="0" to-layer="3476" to-port="1"/>
-		<edge from-layer="3476" from-port="2" to-layer="3492" to-port="0"/>
-		<edge from-layer="3477" from-port="0" to-layer="3479" to-port="1"/>
-		<edge from-layer="3478" from-port="0" to-layer="3479" to-port="2"/>
-		<edge from-layer="3479" from-port="3" to-layer="3481" to-port="0"/>
-		<edge from-layer="3480" from-port="0" to-layer="3481" to-port="1"/>
-		<edge from-layer="3481" from-port="2" to-layer="3483" to-port="0"/>
-		<edge from-layer="3482" from-port="0" to-layer="3483" to-port="1"/>
-		<edge from-layer="3483" from-port="2" to-layer="3491" to-port="0"/>
-		<edge from-layer="3484" from-port="0" to-layer="3486" to-port="1"/>
-		<edge from-layer="3485" from-port="0" to-layer="3486" to-port="2"/>
-		<edge from-layer="3486" from-port="3" to-layer="3491" to-port="1"/>
-		<edge from-layer="3487" from-port="0" to-layer="3488" to-port="1"/>
-		<edge from-layer="3488" from-port="2" to-layer="3490" to-port="0"/>
-		<edge from-layer="3489" from-port="0" to-layer="3490" to-port="1"/>
-		<edge from-layer="3490" from-port="2" to-layer="3491" to-port="2"/>
-		<edge from-layer="3491" from-port="3" to-layer="3492" to-port="1"/>
-		<edge from-layer="3492" from-port="2" to-layer="3493" to-port="1"/>
-		<edge from-layer="3493" from-port="2" to-layer="3494" to-port="0"/>
-		<edge from-layer="3493" from-port="2" to-layer="3506" to-port="0"/>
-		<edge from-layer="3494" from-port="1" to-layer="3515" to-port="0"/>
-		<edge from-layer="3494" from-port="1" to-layer="3504" to-port="0"/>
-		<edge from-layer="3494" from-port="1" to-layer="3518" to-port="0"/>
-		<edge from-layer="3494" from-port="1" to-layer="3497" to-port="0"/>
-		<edge from-layer="3495" from-port="0" to-layer="3497" to-port="1"/>
-		<edge from-layer="3496" from-port="0" to-layer="3497" to-port="2"/>
-		<edge from-layer="3497" from-port="3" to-layer="3499" to-port="0"/>
-		<edge from-layer="3497" from-port="3" to-layer="3510" to-port="0"/>
-		<edge from-layer="3498" from-port="0" to-layer="3499" to-port="1"/>
-		<edge from-layer="3499" from-port="2" to-layer="3501" to-port="0"/>
-		<edge from-layer="3500" from-port="0" to-layer="3501" to-port="1"/>
-		<edge from-layer="3501" from-port="2" to-layer="3505" to-port="0"/>
-		<edge from-layer="3502" from-port="0" to-layer="3504" to-port="1"/>
-		<edge from-layer="3503" from-port="0" to-layer="3504" to-port="2"/>
-		<edge from-layer="3504" from-port="3" to-layer="3505" to-port="2"/>
-		<edge from-layer="3505" from-port="3" to-layer="3506" to-port="1"/>
-		<edge from-layer="3506" from-port="2" to-layer="3508" to-port="0"/>
-		<edge from-layer="3507" from-port="0" to-layer="3508" to-port="1"/>
-		<edge from-layer="3508" from-port="2" to-layer="3524" to-port="0"/>
-		<edge from-layer="3509" from-port="0" to-layer="3510" to-port="1"/>
-		<edge from-layer="3510" from-port="2" to-layer="3512" to-port="0"/>
-		<edge from-layer="3511" from-port="0" to-layer="3512" to-port="1"/>
-		<edge from-layer="3512" from-port="2" to-layer="3523" to-port="0"/>
-		<edge from-layer="3513" from-port="0" to-layer="3515" to-port="1"/>
-		<edge from-layer="3514" from-port="0" to-layer="3515" to-port="2"/>
-		<edge from-layer="3515" from-port="3" to-layer="3523" to-port="1"/>
-		<edge from-layer="3516" from-port="0" to-layer="3518" to-port="1"/>
-		<edge from-layer="3517" from-port="0" to-layer="3518" to-port="2"/>
-		<edge from-layer="3518" from-port="3" to-layer="3520" to-port="0"/>
-		<edge from-layer="3519" from-port="0" to-layer="3520" to-port="1"/>
-		<edge from-layer="3520" from-port="2" to-layer="3522" to-port="0"/>
-		<edge from-layer="3521" from-port="0" to-layer="3522" to-port="1"/>
-		<edge from-layer="3522" from-port="2" to-layer="3523" to-port="2"/>
-		<edge from-layer="3523" from-port="3" to-layer="3524" to-port="1"/>
-		<edge from-layer="3524" from-port="2" to-layer="3526" to-port="0"/>
-		<edge from-layer="3525" from-port="0" to-layer="3526" to-port="1"/>
-		<edge from-layer="3526" from-port="2" to-layer="3527" to-port="1"/>
-		<edge from-layer="3527" from-port="2" to-layer="3528" to-port="0"/>
-		<edge from-layer="3528" from-port="2" to-layer="3617" to-port="1"/>
-		<edge from-layer="3528" from-port="2" to-layer="3530" to-port="0"/>
-		<edge from-layer="3529" from-port="0" to-layer="3530" to-port="1"/>
-		<edge from-layer="3530" from-port="2" to-layer="3532" to-port="0"/>
-		<edge from-layer="3531" from-port="0" to-layer="3532" to-port="1"/>
-		<edge from-layer="3532" from-port="2" to-layer="3534" to-port="0"/>
-		<edge from-layer="3533" from-port="0" to-layer="3534" to-port="1"/>
-		<edge from-layer="3534" from-port="2" to-layer="3536" to-port="0"/>
-		<edge from-layer="3535" from-port="0" to-layer="3536" to-port="1"/>
-		<edge from-layer="3536" from-port="2" to-layer="3538" to-port="0"/>
-		<edge from-layer="3536" from-port="2" to-layer="3547" to-port="0"/>
-		<edge from-layer="3537" from-port="0" to-layer="3546" to-port="0"/>
-		<edge from-layer="3538" from-port="1" to-layer="3541" to-port="0"/>
-		<edge from-layer="3538" from-port="1" to-layer="3559" to-port="0"/>
-		<edge from-layer="3538" from-port="1" to-layer="3552" to-port="0"/>
-		<edge from-layer="3539" from-port="0" to-layer="3541" to-port="1"/>
-		<edge from-layer="3540" from-port="0" to-layer="3541" to-port="2"/>
-		<edge from-layer="3541" from-port="3" to-layer="3561" to-port="0"/>
-		<edge from-layer="3541" from-port="3" to-layer="3543" to-port="0"/>
-		<edge from-layer="3542" from-port="0" to-layer="3543" to-port="1"/>
-		<edge from-layer="3543" from-port="2" to-layer="3545" to-port="0"/>
-		<edge from-layer="3544" from-port="0" to-layer="3545" to-port="1"/>
-		<edge from-layer="3545" from-port="2" to-layer="3546" to-port="2"/>
-		<edge from-layer="3546" from-port="3" to-layer="3547" to-port="1"/>
-		<edge from-layer="3547" from-port="2" to-layer="3549" to-port="0"/>
-		<edge from-layer="3548" from-port="0" to-layer="3549" to-port="1"/>
-		<edge from-layer="3549" from-port="2" to-layer="3565" to-port="0"/>
-		<edge from-layer="3550" from-port="0" to-layer="3552" to-port="1"/>
-		<edge from-layer="3551" from-port="0" to-layer="3552" to-port="2"/>
-		<edge from-layer="3552" from-port="3" to-layer="3554" to-port="0"/>
-		<edge from-layer="3553" from-port="0" to-layer="3554" to-port="1"/>
-		<edge from-layer="3554" from-port="2" to-layer="3556" to-port="0"/>
-		<edge from-layer="3555" from-port="0" to-layer="3556" to-port="1"/>
-		<edge from-layer="3556" from-port="2" to-layer="3564" to-port="0"/>
-		<edge from-layer="3557" from-port="0" to-layer="3559" to-port="1"/>
-		<edge from-layer="3558" from-port="0" to-layer="3559" to-port="2"/>
-		<edge from-layer="3559" from-port="3" to-layer="3564" to-port="1"/>
-		<edge from-layer="3560" from-port="0" to-layer="3561" to-port="1"/>
-		<edge from-layer="3561" from-port="2" to-layer="3563" to-port="0"/>
-		<edge from-layer="3562" from-port="0" to-layer="3563" to-port="1"/>
-		<edge from-layer="3563" from-port="2" to-layer="3564" to-port="2"/>
-		<edge from-layer="3564" from-port="3" to-layer="3565" to-port="1"/>
-		<edge from-layer="3565" from-port="2" to-layer="3572" to-port="0"/>
-		<edge from-layer="3566" from-port="0" to-layer="3567" to-port="1"/>
-		<edge from-layer="3567" from-port="2" to-layer="3568" to-port="0"/>
-		<edge from-layer="3568" from-port="2" to-layer="3570" to-port="0"/>
-		<edge from-layer="3569" from-port="0" to-layer="3570" to-port="1"/>
-		<edge from-layer="3570" from-port="2" to-layer="3571" to-port="0"/>
-		<edge from-layer="3571" from-port="2" to-layer="3572" to-port="1"/>
-		<edge from-layer="3572" from-port="2" to-layer="3574" to-port="0"/>
-		<edge from-layer="3573" from-port="0" to-layer="3574" to-port="1"/>
-		<edge from-layer="3574" from-port="2" to-layer="3575" to-port="0"/>
-		<edge from-layer="3575" from-port="1" to-layer="3582" to-port="0"/>
-		<edge from-layer="3576" from-port="0" to-layer="3577" to-port="1"/>
-		<edge from-layer="3577" from-port="2" to-layer="3578" to-port="0"/>
-		<edge from-layer="3578" from-port="2" to-layer="3580" to-port="0"/>
-		<edge from-layer="3579" from-port="0" to-layer="3580" to-port="1"/>
-		<edge from-layer="3580" from-port="2" to-layer="3581" to-port="0"/>
-		<edge from-layer="3581" from-port="2" to-layer="3582" to-port="1"/>
-		<edge from-layer="3582" from-port="2" to-layer="3583" to-port="0"/>
-		<edge from-layer="3582" from-port="2" to-layer="3595" to-port="0"/>
-		<edge from-layer="3583" from-port="1" to-layer="3586" to-port="0"/>
-		<edge from-layer="3583" from-port="1" to-layer="3604" to-port="0"/>
-		<edge from-layer="3583" from-port="1" to-layer="3607" to-port="0"/>
-		<edge from-layer="3583" from-port="1" to-layer="3593" to-port="0"/>
-		<edge from-layer="3584" from-port="0" to-layer="3586" to-port="1"/>
-		<edge from-layer="3585" from-port="0" to-layer="3586" to-port="2"/>
-		<edge from-layer="3586" from-port="3" to-layer="3599" to-port="0"/>
-		<edge from-layer="3586" from-port="3" to-layer="3588" to-port="0"/>
-		<edge from-layer="3587" from-port="0" to-layer="3588" to-port="1"/>
-		<edge from-layer="3588" from-port="2" to-layer="3590" to-port="0"/>
-		<edge from-layer="3589" from-port="0" to-layer="3590" to-port="1"/>
-		<edge from-layer="3590" from-port="2" to-layer="3594" to-port="0"/>
-		<edge from-layer="3591" from-port="0" to-layer="3593" to-port="1"/>
-		<edge from-layer="3592" from-port="0" to-layer="3593" to-port="2"/>
-		<edge from-layer="3593" from-port="3" to-layer="3594" to-port="2"/>
-		<edge from-layer="3594" from-port="3" to-layer="3595" to-port="1"/>
-		<edge from-layer="3595" from-port="2" to-layer="3597" to-port="0"/>
-		<edge from-layer="3596" from-port="0" to-layer="3597" to-port="1"/>
-		<edge from-layer="3597" from-port="2" to-layer="3613" to-port="0"/>
-		<edge from-layer="3598" from-port="0" to-layer="3599" to-port="1"/>
-		<edge from-layer="3599" from-port="2" to-layer="3601" to-port="0"/>
-		<edge from-layer="3600" from-port="0" to-layer="3601" to-port="1"/>
-		<edge from-layer="3601" from-port="2" to-layer="3612" to-port="0"/>
-		<edge from-layer="3602" from-port="0" to-layer="3604" to-port="1"/>
-		<edge from-layer="3603" from-port="0" to-layer="3604" to-port="2"/>
-		<edge from-layer="3604" from-port="3" to-layer="3612" to-port="1"/>
-		<edge from-layer="3605" from-port="0" to-layer="3607" to-port="1"/>
-		<edge from-layer="3606" from-port="0" to-layer="3607" to-port="2"/>
-		<edge from-layer="3607" from-port="3" to-layer="3609" to-port="0"/>
-		<edge from-layer="3608" from-port="0" to-layer="3609" to-port="1"/>
-		<edge from-layer="3609" from-port="2" to-layer="3611" to-port="0"/>
-		<edge from-layer="3610" from-port="0" to-layer="3611" to-port="1"/>
-		<edge from-layer="3611" from-port="2" to-layer="3612" to-port="2"/>
-		<edge from-layer="3612" from-port="3" to-layer="3613" to-port="1"/>
-		<edge from-layer="3613" from-port="2" to-layer="3615" to-port="0"/>
-		<edge from-layer="3614" from-port="0" to-layer="3615" to-port="1"/>
-		<edge from-layer="3615" from-port="2" to-layer="3616" to-port="1"/>
-		<edge from-layer="3616" from-port="2" to-layer="3617" to-port="0"/>
-		<edge from-layer="3617" from-port="2" to-layer="3657" to-port="1"/>
-		<edge from-layer="3617" from-port="2" to-layer="3619" to-port="0"/>
-		<edge from-layer="3618" from-port="0" to-layer="3619" to-port="1"/>
-		<edge from-layer="3619" from-port="2" to-layer="3621" to-port="0"/>
-		<edge from-layer="3620" from-port="0" to-layer="3621" to-port="1"/>
-		<edge from-layer="3621" from-port="2" to-layer="3623" to-port="0"/>
-		<edge from-layer="3622" from-port="0" to-layer="3623" to-port="1"/>
-		<edge from-layer="3623" from-port="2" to-layer="3625" to-port="0"/>
-		<edge from-layer="3624" from-port="0" to-layer="3625" to-port="1"/>
-		<edge from-layer="3625" from-port="2" to-layer="3626" to-port="1"/>
-		<edge from-layer="3626" from-port="2" to-layer="3651" to-port="0"/>
-		<edge from-layer="3626" from-port="2" to-layer="3630" to-port="0"/>
-		<edge from-layer="3626" from-port="2" to-layer="3641" to-port="0"/>
-		<edge from-layer="3627" from-port="0" to-layer="3641" to-port="1"/>
-		<edge from-layer="3628" from-port="0" to-layer="3639" to-port="0"/>
-		<edge from-layer="3629" from-port="0" to-layer="3639" to-port="1"/>
-		<edge from-layer="3630" from-port="1" to-layer="3633" to-port="0"/>
-		<edge from-layer="3631" from-port="0" to-layer="3633" to-port="1"/>
-		<edge from-layer="3632" from-port="0" to-layer="3633" to-port="2"/>
-		<edge from-layer="3633" from-port="3" to-layer="3635" to-port="0"/>
-		<edge from-layer="3634" from-port="0" to-layer="3635" to-port="1"/>
-		<edge from-layer="3635" from-port="2" to-layer="3637" to-port="0"/>
-		<edge from-layer="3636" from-port="0" to-layer="3637" to-port="1"/>
-		<edge from-layer="3637" from-port="2" to-layer="3639" to-port="2"/>
-		<edge from-layer="3637" from-port="2" to-layer="3645" to-port="2"/>
-		<edge from-layer="3637" from-port="2" to-layer="3648" to-port="0"/>
-		<edge from-layer="3638" from-port="0" to-layer="3639" to-port="3"/>
-		<edge from-layer="3639" from-port="4" to-layer="3641" to-port="2"/>
-		<edge from-layer="3640" from-port="0" to-layer="3641" to-port="3"/>
-		<edge from-layer="3641" from-port="4" to-layer="3653" to-port="0"/>
-		<edge from-layer="3642" from-port="0" to-layer="3645" to-port="0"/>
-		<edge from-layer="3643" from-port="0" to-layer="3649" to-port="1"/>
-		<edge from-layer="3643" from-port="0" to-layer="3645" to-port="1"/>
-		<edge from-layer="3644" from-port="0" to-layer="3649" to-port="3"/>
-		<edge from-layer="3644" from-port="0" to-layer="3645" to-port="3"/>
-		<edge from-layer="3645" from-port="4" to-layer="3651" to-port="1"/>
-		<edge from-layer="3646" from-port="0" to-layer="3649" to-port="0"/>
-		<edge from-layer="3647" from-port="0" to-layer="3648" to-port="1"/>
-		<edge from-layer="3648" from-port="2" to-layer="3649" to-port="2"/>
-		<edge from-layer="3649" from-port="4" to-layer="3651" to-port="2"/>
-		<edge from-layer="3650" from-port="0" to-layer="3651" to-port="3"/>
-		<edge from-layer="3651" from-port="4" to-layer="3652" to-port="0"/>
-		<edge from-layer="3652" from-port="1" to-layer="3653" to-port="1"/>
-		<edge from-layer="3653" from-port="2" to-layer="3655" to-port="0"/>
-		<edge from-layer="3654" from-port="0" to-layer="3655" to-port="1"/>
-		<edge from-layer="3655" from-port="2" to-layer="3656" to-port="1"/>
-		<edge from-layer="3656" from-port="2" to-layer="3657" to-port="0"/>
-		<edge from-layer="3657" from-port="2" to-layer="3661" to-port="0"/>
-		<edge from-layer="3658" from-port="0" to-layer="3660" to-port="1"/>
-		<edge from-layer="3659" from-port="0" to-layer="3660" to-port="2"/>
-		<edge from-layer="3660" from-port="3" to-layer="3661" to-port="1"/>
-		<edge from-layer="3661" from-port="2" to-layer="3663" to-port="0"/>
-		<edge from-layer="3662" from-port="0" to-layer="3663" to-port="1"/>
-		<edge from-layer="3663" from-port="2" to-layer="3665" to-port="0"/>
-		<edge from-layer="3664" from-port="0" to-layer="3665" to-port="1"/>
-		<edge from-layer="3665" from-port="2" to-layer="3667" to-port="0"/>
-		<edge from-layer="3666" from-port="0" to-layer="3667" to-port="1"/>
-		<edge from-layer="3667" from-port="2" to-layer="3668" to-port="0"/>
-		<edge from-layer="3668" from-port="2" to-layer="3669" to-port="0"/>
-		<edge from-layer="3669" from-port="2" to-layer="3671" to-port="0"/>
-		<edge from-layer="3669" from-port="2" to-layer="3678" to-port="0"/>
-		<edge from-layer="3669" from-port="2" to-layer="3675" to-port="0"/>
-		<edge from-layer="3670" from-port="0" to-layer="3671" to-port="1"/>
-		<edge from-layer="3671" from-port="2" to-layer="3673" to-port="0"/>
-		<edge from-layer="3672" from-port="0" to-layer="3673" to-port="1"/>
-		<edge from-layer="3673" from-port="2" to-layer="3714" to-port="0"/>
-		<edge from-layer="3674" from-port="0" to-layer="3675" to-port="1"/>
-		<edge from-layer="3675" from-port="2" to-layer="3677" to-port="0"/>
-		<edge from-layer="3676" from-port="0" to-layer="3677" to-port="1"/>
-		<edge from-layer="3677" from-port="2" to-layer="3679" to-port="0"/>
-		<edge from-layer="3678" from-port="1" to-layer="3679" to-port="1"/>
-		<edge from-layer="3679" from-port="2" to-layer="3681" to-port="0"/>
-		<edge from-layer="3680" from-port="0" to-layer="3681" to-port="1"/>
-		<edge from-layer="3681" from-port="2" to-layer="3683" to-port="0"/>
-		<edge from-layer="3682" from-port="0" to-layer="3683" to-port="1"/>
-		<edge from-layer="3683" from-port="2" to-layer="3684" to-port="0"/>
-		<edge from-layer="3684" from-port="1" to-layer="3686" to-port="0"/>
-		<edge from-layer="3685" from-port="0" to-layer="3686" to-port="1"/>
-		<edge from-layer="3686" from-port="2" to-layer="3688" to-port="0"/>
-		<edge from-layer="3687" from-port="0" to-layer="3688" to-port="1"/>
-		<edge from-layer="3688" from-port="2" to-layer="3698" to-port="0"/>
-		<edge from-layer="3689" from-port="1" to-layer="3691" to-port="0"/>
-		<edge from-layer="3690" from-port="0" to-layer="3691" to-port="1"/>
-		<edge from-layer="3691" from-port="2" to-layer="3693" to-port="0"/>
-		<edge from-layer="3692" from-port="0" to-layer="3693" to-port="1"/>
-		<edge from-layer="3693" from-port="2" to-layer="3695" to-port="0"/>
-		<edge from-layer="3694" from-port="0" to-layer="3695" to-port="1"/>
-		<edge from-layer="3695" from-port="2" to-layer="3697" to-port="0"/>
-		<edge from-layer="3696" from-port="0" to-layer="3697" to-port="1"/>
-		<edge from-layer="3697" from-port="2" to-layer="3698" to-port="1"/>
-		<edge from-layer="3698" from-port="2" to-layer="3700" to-port="0"/>
-		<edge from-layer="3698" from-port="2" to-layer="3703" to-port="0"/>
-		<edge from-layer="3699" from-port="0" to-layer="3700" to-port="1"/>
-		<edge from-layer="3700" from-port="2" to-layer="3702" to-port="0"/>
-		<edge from-layer="3701" from-port="0" to-layer="3702" to-port="1"/>
-		<edge from-layer="3702" from-port="2" to-layer="3704" to-port="0"/>
-		<edge from-layer="3703" from-port="1" to-layer="3704" to-port="1"/>
-		<edge from-layer="3704" from-port="2" to-layer="3706" to-port="0"/>
-		<edge from-layer="3705" from-port="0" to-layer="3706" to-port="1"/>
-		<edge from-layer="3706" from-port="2" to-layer="3708" to-port="0"/>
-		<edge from-layer="3707" from-port="0" to-layer="3708" to-port="1"/>
-		<edge from-layer="3708" from-port="2" to-layer="3709" to-port="0"/>
-		<edge from-layer="3709" from-port="1" to-layer="3711" to-port="0"/>
-		<edge from-layer="3710" from-port="0" to-layer="3711" to-port="1"/>
-		<edge from-layer="3711" from-port="2" to-layer="3713" to-port="0"/>
-		<edge from-layer="3712" from-port="0" to-layer="3713" to-port="1"/>
-		<edge from-layer="3713" from-port="2" to-layer="3714" to-port="1"/>
-		<edge from-layer="3714" from-port="2" to-layer="3716" to-port="0"/>
-		<edge from-layer="3714" from-port="2" to-layer="3719" to-port="0"/>
-		<edge from-layer="3714" from-port="2" to-layer="4026" to-port="1"/>
-		<edge from-layer="3715" from-port="0" to-layer="3716" to-port="1"/>
-		<edge from-layer="3716" from-port="2" to-layer="3718" to-port="0"/>
-		<edge from-layer="3717" from-port="0" to-layer="3718" to-port="1"/>
-		<edge from-layer="3718" from-port="2" to-layer="3720" to-port="0"/>
-		<edge from-layer="3719" from-port="1" to-layer="4018" to-port="0"/>
-		<edge from-layer="3719" from-port="1" to-layer="3720" to-port="1"/>
-		<edge from-layer="3719" from-port="1" to-layer="3733" to-port="0"/>
-		<edge from-layer="3719" from-port="1" to-layer="3736" to-port="0"/>
-		<edge from-layer="3719" from-port="1" to-layer="3739" to-port="0"/>
-		<edge from-layer="3719" from-port="1" to-layer="3745" to-port="0"/>
-		<edge from-layer="3720" from-port="2" to-layer="3722" to-port="0"/>
-		<edge from-layer="3721" from-port="0" to-layer="3722" to-port="1"/>
-		<edge from-layer="3722" from-port="2" to-layer="3724" to-port="0"/>
-		<edge from-layer="3723" from-port="0" to-layer="3724" to-port="1"/>
-		<edge from-layer="3724" from-port="2" to-layer="3726" to-port="0"/>
-		<edge from-layer="3725" from-port="0" to-layer="3726" to-port="1"/>
-		<edge from-layer="3726" from-port="2" to-layer="3728" to-port="0"/>
-		<edge from-layer="3727" from-port="0" to-layer="3728" to-port="1"/>
-		<edge from-layer="3728" from-port="2" to-layer="3730" to-port="0"/>
-		<edge from-layer="3729" from-port="0" to-layer="3730" to-port="1"/>
-		<edge from-layer="3730" from-port="2" to-layer="3747" to-port="0"/>
-		<edge from-layer="3731" from-port="0" to-layer="3733" to-port="1"/>
-		<edge from-layer="3732" from-port="0" to-layer="3733" to-port="2"/>
-		<edge from-layer="3733" from-port="3" to-layer="3746" to-port="0"/>
-		<edge from-layer="3734" from-port="0" to-layer="3736" to-port="1"/>
-		<edge from-layer="3735" from-port="0" to-layer="3736" to-port="2"/>
-		<edge from-layer="3736" from-port="3" to-layer="3740" to-port="0"/>
-		<edge from-layer="3737" from-port="0" to-layer="3739" to-port="1"/>
-		<edge from-layer="3738" from-port="0" to-layer="3739" to-port="2"/>
-		<edge from-layer="3739" from-port="3" to-layer="3740" to-port="1"/>
-		<edge from-layer="3740" from-port="2" to-layer="3742" to-port="0"/>
-		<edge from-layer="3741" from-port="0" to-layer="3742" to-port="1"/>
-		<edge from-layer="3742" from-port="2" to-layer="3746" to-port="1"/>
-		<edge from-layer="3743" from-port="0" to-layer="3745" to-port="1"/>
-		<edge from-layer="3744" from-port="0" to-layer="3745" to-port="2"/>
-		<edge from-layer="3745" from-port="3" to-layer="3746" to-port="2"/>
-		<edge from-layer="3746" from-port="3" to-layer="3747" to-port="1"/>
-		<edge from-layer="3747" from-port="2" to-layer="3749" to-port="0"/>
-		<edge from-layer="3747" from-port="2" to-layer="3886" to-port="1"/>
-		<edge from-layer="3748" from-port="0" to-layer="3749" to-port="1"/>
-		<edge from-layer="3749" from-port="2" to-layer="3751" to-port="0"/>
-		<edge from-layer="3750" from-port="0" to-layer="3751" to-port="1"/>
-		<edge from-layer="3751" from-port="2" to-layer="3753" to-port="0"/>
-		<edge from-layer="3752" from-port="0" to-layer="3753" to-port="1"/>
-		<edge from-layer="3753" from-port="2" to-layer="3755" to-port="0"/>
-		<edge from-layer="3753" from-port="2" to-layer="3821" to-port="0"/>
-		<edge from-layer="3753" from-port="2" to-layer="3786" to-port="0"/>
-		<edge from-layer="3754" from-port="0" to-layer="3755" to-port="1"/>
-		<edge from-layer="3755" from-port="2" to-layer="3757" to-port="0"/>
-		<edge from-layer="3755" from-port="2" to-layer="3766" to-port="0"/>
-		<edge from-layer="3756" from-port="0" to-layer="3765" to-port="0"/>
-		<edge from-layer="3757" from-port="1" to-layer="3760" to-port="0"/>
-		<edge from-layer="3757" from-port="1" to-layer="3778" to-port="0"/>
-		<edge from-layer="3757" from-port="1" to-layer="3771" to-port="0"/>
-		<edge from-layer="3758" from-port="0" to-layer="3760" to-port="1"/>
-		<edge from-layer="3759" from-port="0" to-layer="3760" to-port="2"/>
-		<edge from-layer="3760" from-port="3" to-layer="3780" to-port="0"/>
-		<edge from-layer="3760" from-port="3" to-layer="3762" to-port="0"/>
-		<edge from-layer="3761" from-port="0" to-layer="3762" to-port="1"/>
-		<edge from-layer="3762" from-port="2" to-layer="3764" to-port="0"/>
-		<edge from-layer="3763" from-port="0" to-layer="3764" to-port="1"/>
-		<edge from-layer="3764" from-port="2" to-layer="3765" to-port="2"/>
-		<edge from-layer="3765" from-port="3" to-layer="3766" to-port="1"/>
-		<edge from-layer="3766" from-port="2" to-layer="3768" to-port="0"/>
-		<edge from-layer="3767" from-port="0" to-layer="3768" to-port="1"/>
-		<edge from-layer="3768" from-port="2" to-layer="3784" to-port="0"/>
-		<edge from-layer="3769" from-port="0" to-layer="3771" to-port="1"/>
-		<edge from-layer="3770" from-port="0" to-layer="3771" to-port="2"/>
-		<edge from-layer="3771" from-port="3" to-layer="3773" to-port="0"/>
-		<edge from-layer="3772" from-port="0" to-layer="3773" to-port="1"/>
-		<edge from-layer="3773" from-port="2" to-layer="3775" to-port="0"/>
-		<edge from-layer="3774" from-port="0" to-layer="3775" to-port="1"/>
-		<edge from-layer="3775" from-port="2" to-layer="3783" to-port="0"/>
-		<edge from-layer="3776" from-port="0" to-layer="3778" to-port="1"/>
-		<edge from-layer="3777" from-port="0" to-layer="3778" to-port="2"/>
-		<edge from-layer="3778" from-port="3" to-layer="3783" to-port="1"/>
-		<edge from-layer="3779" from-port="0" to-layer="3780" to-port="1"/>
-		<edge from-layer="3780" from-port="2" to-layer="3782" to-port="0"/>
-		<edge from-layer="3781" from-port="0" to-layer="3782" to-port="1"/>
-		<edge from-layer="3782" from-port="2" to-layer="3783" to-port="2"/>
-		<edge from-layer="3783" from-port="3" to-layer="3784" to-port="1"/>
-		<edge from-layer="3784" from-port="2" to-layer="3816" to-port="0"/>
-		<edge from-layer="3785" from-port="0" to-layer="3786" to-port="1"/>
-		<edge from-layer="3786" from-port="2" to-layer="3788" to-port="0"/>
-		<edge from-layer="3786" from-port="2" to-layer="3797" to-port="0"/>
-		<edge from-layer="3787" from-port="0" to-layer="3796" to-port="0"/>
-		<edge from-layer="3788" from-port="1" to-layer="3791" to-port="0"/>
-		<edge from-layer="3788" from-port="1" to-layer="3802" to-port="0"/>
-		<edge from-layer="3788" from-port="1" to-layer="3809" to-port="0"/>
-		<edge from-layer="3789" from-port="0" to-layer="3791" to-port="1"/>
-		<edge from-layer="3790" from-port="0" to-layer="3791" to-port="2"/>
-		<edge from-layer="3791" from-port="3" to-layer="3793" to-port="0"/>
-		<edge from-layer="3791" from-port="3" to-layer="3811" to-port="0"/>
-		<edge from-layer="3792" from-port="0" to-layer="3793" to-port="1"/>
-		<edge from-layer="3793" from-port="2" to-layer="3795" to-port="0"/>
-		<edge from-layer="3794" from-port="0" to-layer="3795" to-port="1"/>
-		<edge from-layer="3795" from-port="2" to-layer="3796" to-port="2"/>
-		<edge from-layer="3796" from-port="3" to-layer="3797" to-port="1"/>
-		<edge from-layer="3797" from-port="2" to-layer="3799" to-port="0"/>
-		<edge from-layer="3798" from-port="0" to-layer="3799" to-port="1"/>
-		<edge from-layer="3799" from-port="2" to-layer="3815" to-port="0"/>
-		<edge from-layer="3800" from-port="0" to-layer="3802" to-port="1"/>
-		<edge from-layer="3801" from-port="0" to-layer="3802" to-port="2"/>
-		<edge from-layer="3802" from-port="3" to-layer="3804" to-port="0"/>
-		<edge from-layer="3803" from-port="0" to-layer="3804" to-port="1"/>
-		<edge from-layer="3804" from-port="2" to-layer="3806" to-port="0"/>
-		<edge from-layer="3805" from-port="0" to-layer="3806" to-port="1"/>
-		<edge from-layer="3806" from-port="2" to-layer="3814" to-port="0"/>
-		<edge from-layer="3807" from-port="0" to-layer="3809" to-port="1"/>
-		<edge from-layer="3808" from-port="0" to-layer="3809" to-port="2"/>
-		<edge from-layer="3809" from-port="3" to-layer="3814" to-port="1"/>
-		<edge from-layer="3810" from-port="0" to-layer="3811" to-port="1"/>
-		<edge from-layer="3811" from-port="2" to-layer="3813" to-port="0"/>
-		<edge from-layer="3812" from-port="0" to-layer="3813" to-port="1"/>
-		<edge from-layer="3813" from-port="2" to-layer="3814" to-port="2"/>
-		<edge from-layer="3814" from-port="3" to-layer="3815" to-port="1"/>
-		<edge from-layer="3815" from-port="2" to-layer="3816" to-port="1"/>
-		<edge from-layer="3816" from-port="2" to-layer="3818" to-port="0"/>
-		<edge from-layer="3817" from-port="0" to-layer="3818" to-port="1"/>
-		<edge from-layer="3818" from-port="2" to-layer="3819" to-port="0"/>
-		<edge from-layer="3819" from-port="1" to-layer="3851" to-port="0"/>
-		<edge from-layer="3820" from-port="0" to-layer="3821" to-port="1"/>
-		<edge from-layer="3821" from-port="2" to-layer="3823" to-port="0"/>
-		<edge from-layer="3821" from-port="2" to-layer="3832" to-port="0"/>
-		<edge from-layer="3822" from-port="0" to-layer="3831" to-port="0"/>
-		<edge from-layer="3823" from-port="1" to-layer="3844" to-port="0"/>
-		<edge from-layer="3823" from-port="1" to-layer="3837" to-port="0"/>
-		<edge from-layer="3823" from-port="1" to-layer="3826" to-port="0"/>
-		<edge from-layer="3824" from-port="0" to-layer="3826" to-port="1"/>
-		<edge from-layer="3825" from-port="0" to-layer="3826" to-port="2"/>
-		<edge from-layer="3826" from-port="3" to-layer="3828" to-port="0"/>
-		<edge from-layer="3826" from-port="3" to-layer="3846" to-port="0"/>
-		<edge from-layer="3827" from-port="0" to-layer="3828" to-port="1"/>
-		<edge from-layer="3828" from-port="2" to-layer="3830" to-port="0"/>
-		<edge from-layer="3829" from-port="0" to-layer="3830" to-port="1"/>
-		<edge from-layer="3830" from-port="2" to-layer="3831" to-port="2"/>
-		<edge from-layer="3831" from-port="3" to-layer="3832" to-port="1"/>
-		<edge from-layer="3832" from-port="2" to-layer="3834" to-port="0"/>
-		<edge from-layer="3833" from-port="0" to-layer="3834" to-port="1"/>
-		<edge from-layer="3834" from-port="2" to-layer="3850" to-port="0"/>
-		<edge from-layer="3835" from-port="0" to-layer="3837" to-port="1"/>
-		<edge from-layer="3836" from-port="0" to-layer="3837" to-port="2"/>
-		<edge from-layer="3837" from-port="3" to-layer="3839" to-port="0"/>
-		<edge from-layer="3838" from-port="0" to-layer="3839" to-port="1"/>
-		<edge from-layer="3839" from-port="2" to-layer="3841" to-port="0"/>
-		<edge from-layer="3840" from-port="0" to-layer="3841" to-port="1"/>
-		<edge from-layer="3841" from-port="2" to-layer="3849" to-port="0"/>
-		<edge from-layer="3842" from-port="0" to-layer="3844" to-port="1"/>
-		<edge from-layer="3843" from-port="0" to-layer="3844" to-port="2"/>
-		<edge from-layer="3844" from-port="3" to-layer="3849" to-port="1"/>
-		<edge from-layer="3845" from-port="0" to-layer="3846" to-port="1"/>
-		<edge from-layer="3846" from-port="2" to-layer="3848" to-port="0"/>
-		<edge from-layer="3847" from-port="0" to-layer="3848" to-port="1"/>
-		<edge from-layer="3848" from-port="2" to-layer="3849" to-port="2"/>
-		<edge from-layer="3849" from-port="3" to-layer="3850" to-port="1"/>
-		<edge from-layer="3850" from-port="2" to-layer="3851" to-port="1"/>
-		<edge from-layer="3851" from-port="2" to-layer="3852" to-port="0"/>
-		<edge from-layer="3851" from-port="2" to-layer="3864" to-port="0"/>
-		<edge from-layer="3852" from-port="1" to-layer="3855" to-port="0"/>
-		<edge from-layer="3852" from-port="1" to-layer="3862" to-port="0"/>
-		<edge from-layer="3852" from-port="1" to-layer="3876" to-port="0"/>
-		<edge from-layer="3852" from-port="1" to-layer="3873" to-port="0"/>
-		<edge from-layer="3853" from-port="0" to-layer="3855" to-port="1"/>
-		<edge from-layer="3854" from-port="0" to-layer="3855" to-port="2"/>
-		<edge from-layer="3855" from-port="3" to-layer="3868" to-port="0"/>
-		<edge from-layer="3855" from-port="3" to-layer="3857" to-port="0"/>
-		<edge from-layer="3856" from-port="0" to-layer="3857" to-port="1"/>
-		<edge from-layer="3857" from-port="2" to-layer="3859" to-port="0"/>
-		<edge from-layer="3858" from-port="0" to-layer="3859" to-port="1"/>
-		<edge from-layer="3859" from-port="2" to-layer="3863" to-port="0"/>
-		<edge from-layer="3860" from-port="0" to-layer="3862" to-port="1"/>
-		<edge from-layer="3861" from-port="0" to-layer="3862" to-port="2"/>
-		<edge from-layer="3862" from-port="3" to-layer="3863" to-port="2"/>
-		<edge from-layer="3863" from-port="3" to-layer="3864" to-port="1"/>
-		<edge from-layer="3864" from-port="2" to-layer="3866" to-port="0"/>
-		<edge from-layer="3865" from-port="0" to-layer="3866" to-port="1"/>
-		<edge from-layer="3866" from-port="2" to-layer="3882" to-port="0"/>
-		<edge from-layer="3867" from-port="0" to-layer="3868" to-port="1"/>
-		<edge from-layer="3868" from-port="2" to-layer="3870" to-port="0"/>
-		<edge from-layer="3869" from-port="0" to-layer="3870" to-port="1"/>
-		<edge from-layer="3870" from-port="2" to-layer="3881" to-port="0"/>
-		<edge from-layer="3871" from-port="0" to-layer="3873" to-port="1"/>
-		<edge from-layer="3872" from-port="0" to-layer="3873" to-port="2"/>
-		<edge from-layer="3873" from-port="3" to-layer="3881" to-port="1"/>
-		<edge from-layer="3874" from-port="0" to-layer="3876" to-port="1"/>
-		<edge from-layer="3875" from-port="0" to-layer="3876" to-port="2"/>
-		<edge from-layer="3876" from-port="3" to-layer="3878" to-port="0"/>
-		<edge from-layer="3877" from-port="0" to-layer="3878" to-port="1"/>
-		<edge from-layer="3878" from-port="2" to-layer="3880" to-port="0"/>
-		<edge from-layer="3879" from-port="0" to-layer="3880" to-port="1"/>
-		<edge from-layer="3880" from-port="2" to-layer="3881" to-port="2"/>
-		<edge from-layer="3881" from-port="3" to-layer="3882" to-port="1"/>
-		<edge from-layer="3882" from-port="2" to-layer="3884" to-port="0"/>
-		<edge from-layer="3883" from-port="0" to-layer="3884" to-port="1"/>
-		<edge from-layer="3884" from-port="2" to-layer="3885" to-port="1"/>
-		<edge from-layer="3885" from-port="2" to-layer="3886" to-port="0"/>
-		<edge from-layer="3886" from-port="2" to-layer="3975" to-port="1"/>
-		<edge from-layer="3886" from-port="2" to-layer="3888" to-port="0"/>
-		<edge from-layer="3887" from-port="0" to-layer="3888" to-port="1"/>
-		<edge from-layer="3888" from-port="2" to-layer="3890" to-port="0"/>
-		<edge from-layer="3889" from-port="0" to-layer="3890" to-port="1"/>
-		<edge from-layer="3890" from-port="2" to-layer="3892" to-port="0"/>
-		<edge from-layer="3891" from-port="0" to-layer="3892" to-port="1"/>
-		<edge from-layer="3892" from-port="2" to-layer="3894" to-port="0"/>
-		<edge from-layer="3893" from-port="0" to-layer="3894" to-port="1"/>
-		<edge from-layer="3894" from-port="2" to-layer="3896" to-port="0"/>
-		<edge from-layer="3894" from-port="2" to-layer="3905" to-port="0"/>
-		<edge from-layer="3895" from-port="0" to-layer="3904" to-port="0"/>
-		<edge from-layer="3896" from-port="1" to-layer="3917" to-port="0"/>
-		<edge from-layer="3896" from-port="1" to-layer="3910" to-port="0"/>
-		<edge from-layer="3896" from-port="1" to-layer="3899" to-port="0"/>
-		<edge from-layer="3897" from-port="0" to-layer="3899" to-port="1"/>
-		<edge from-layer="3898" from-port="0" to-layer="3899" to-port="2"/>
-		<edge from-layer="3899" from-port="3" to-layer="3901" to-port="0"/>
-		<edge from-layer="3899" from-port="3" to-layer="3919" to-port="0"/>
-		<edge from-layer="3900" from-port="0" to-layer="3901" to-port="1"/>
-		<edge from-layer="3901" from-port="2" to-layer="3903" to-port="0"/>
-		<edge from-layer="3902" from-port="0" to-layer="3903" to-port="1"/>
-		<edge from-layer="3903" from-port="2" to-layer="3904" to-port="2"/>
-		<edge from-layer="3904" from-port="3" to-layer="3905" to-port="1"/>
-		<edge from-layer="3905" from-port="2" to-layer="3907" to-port="0"/>
-		<edge from-layer="3906" from-port="0" to-layer="3907" to-port="1"/>
-		<edge from-layer="3907" from-port="2" to-layer="3923" to-port="0"/>
-		<edge from-layer="3908" from-port="0" to-layer="3910" to-port="1"/>
-		<edge from-layer="3909" from-port="0" to-layer="3910" to-port="2"/>
-		<edge from-layer="3910" from-port="3" to-layer="3912" to-port="0"/>
-		<edge from-layer="3911" from-port="0" to-layer="3912" to-port="1"/>
-		<edge from-layer="3912" from-port="2" to-layer="3914" to-port="0"/>
-		<edge from-layer="3913" from-port="0" to-layer="3914" to-port="1"/>
-		<edge from-layer="3914" from-port="2" to-layer="3922" to-port="0"/>
-		<edge from-layer="3915" from-port="0" to-layer="3917" to-port="1"/>
-		<edge from-layer="3916" from-port="0" to-layer="3917" to-port="2"/>
-		<edge from-layer="3917" from-port="3" to-layer="3922" to-port="1"/>
-		<edge from-layer="3918" from-port="0" to-layer="3919" to-port="1"/>
-		<edge from-layer="3919" from-port="2" to-layer="3921" to-port="0"/>
-		<edge from-layer="3920" from-port="0" to-layer="3921" to-port="1"/>
-		<edge from-layer="3921" from-port="2" to-layer="3922" to-port="2"/>
-		<edge from-layer="3922" from-port="3" to-layer="3923" to-port="1"/>
-		<edge from-layer="3923" from-port="2" to-layer="3930" to-port="0"/>
-		<edge from-layer="3924" from-port="0" to-layer="3925" to-port="1"/>
-		<edge from-layer="3925" from-port="2" to-layer="3926" to-port="0"/>
-		<edge from-layer="3926" from-port="2" to-layer="3928" to-port="0"/>
-		<edge from-layer="3927" from-port="0" to-layer="3928" to-port="1"/>
-		<edge from-layer="3928" from-port="2" to-layer="3929" to-port="0"/>
-		<edge from-layer="3929" from-port="2" to-layer="3930" to-port="1"/>
-		<edge from-layer="3930" from-port="2" to-layer="3932" to-port="0"/>
-		<edge from-layer="3931" from-port="0" to-layer="3932" to-port="1"/>
-		<edge from-layer="3932" from-port="2" to-layer="3933" to-port="0"/>
-		<edge from-layer="3933" from-port="1" to-layer="3940" to-port="0"/>
-		<edge from-layer="3934" from-port="0" to-layer="3935" to-port="1"/>
-		<edge from-layer="3935" from-port="2" to-layer="3936" to-port="0"/>
-		<edge from-layer="3936" from-port="2" to-layer="3938" to-port="0"/>
-		<edge from-layer="3937" from-port="0" to-layer="3938" to-port="1"/>
-		<edge from-layer="3938" from-port="2" to-layer="3939" to-port="0"/>
-		<edge from-layer="3939" from-port="2" to-layer="3940" to-port="1"/>
-		<edge from-layer="3940" from-port="2" to-layer="3953" to-port="0"/>
-		<edge from-layer="3940" from-port="2" to-layer="3941" to-port="0"/>
-		<edge from-layer="3941" from-port="1" to-layer="3951" to-port="0"/>
-		<edge from-layer="3941" from-port="1" to-layer="3962" to-port="0"/>
-		<edge from-layer="3941" from-port="1" to-layer="3965" to-port="0"/>
-		<edge from-layer="3941" from-port="1" to-layer="3944" to-port="0"/>
-		<edge from-layer="3942" from-port="0" to-layer="3944" to-port="1"/>
-		<edge from-layer="3943" from-port="0" to-layer="3944" to-port="2"/>
-		<edge from-layer="3944" from-port="3" to-layer="3946" to-port="0"/>
-		<edge from-layer="3944" from-port="3" to-layer="3957" to-port="0"/>
-		<edge from-layer="3945" from-port="0" to-layer="3946" to-port="1"/>
-		<edge from-layer="3946" from-port="2" to-layer="3948" to-port="0"/>
-		<edge from-layer="3947" from-port="0" to-layer="3948" to-port="1"/>
-		<edge from-layer="3948" from-port="2" to-layer="3952" to-port="0"/>
-		<edge from-layer="3949" from-port="0" to-layer="3951" to-port="1"/>
-		<edge from-layer="3950" from-port="0" to-layer="3951" to-port="2"/>
-		<edge from-layer="3951" from-port="3" to-layer="3952" to-port="2"/>
-		<edge from-layer="3952" from-port="3" to-layer="3953" to-port="1"/>
-		<edge from-layer="3953" from-port="2" to-layer="3955" to-port="0"/>
-		<edge from-layer="3954" from-port="0" to-layer="3955" to-port="1"/>
-		<edge from-layer="3955" from-port="2" to-layer="3971" to-port="0"/>
-		<edge from-layer="3956" from-port="0" to-layer="3957" to-port="1"/>
-		<edge from-layer="3957" from-port="2" to-layer="3959" to-port="0"/>
-		<edge from-layer="3958" from-port="0" to-layer="3959" to-port="1"/>
-		<edge from-layer="3959" from-port="2" to-layer="3970" to-port="0"/>
-		<edge from-layer="3960" from-port="0" to-layer="3962" to-port="1"/>
-		<edge from-layer="3961" from-port="0" to-layer="3962" to-port="2"/>
-		<edge from-layer="3962" from-port="3" to-layer="3970" to-port="1"/>
-		<edge from-layer="3963" from-port="0" to-layer="3965" to-port="1"/>
-		<edge from-layer="3964" from-port="0" to-layer="3965" to-port="2"/>
-		<edge from-layer="3965" from-port="3" to-layer="3967" to-port="0"/>
-		<edge from-layer="3966" from-port="0" to-layer="3967" to-port="1"/>
-		<edge from-layer="3967" from-port="2" to-layer="3969" to-port="0"/>
-		<edge from-layer="3968" from-port="0" to-layer="3969" to-port="1"/>
-		<edge from-layer="3969" from-port="2" to-layer="3970" to-port="2"/>
-		<edge from-layer="3970" from-port="3" to-layer="3971" to-port="1"/>
-		<edge from-layer="3971" from-port="2" to-layer="3973" to-port="0"/>
-		<edge from-layer="3972" from-port="0" to-layer="3973" to-port="1"/>
-		<edge from-layer="3973" from-port="2" to-layer="3974" to-port="1"/>
-		<edge from-layer="3974" from-port="2" to-layer="3975" to-port="0"/>
-		<edge from-layer="3975" from-port="2" to-layer="3977" to-port="0"/>
-		<edge from-layer="3975" from-port="2" to-layer="4015" to-port="1"/>
-		<edge from-layer="3976" from-port="0" to-layer="3977" to-port="1"/>
-		<edge from-layer="3977" from-port="2" to-layer="3979" to-port="0"/>
-		<edge from-layer="3978" from-port="0" to-layer="3979" to-port="1"/>
-		<edge from-layer="3979" from-port="2" to-layer="3981" to-port="0"/>
-		<edge from-layer="3980" from-port="0" to-layer="3981" to-port="1"/>
-		<edge from-layer="3981" from-port="2" to-layer="3983" to-port="0"/>
-		<edge from-layer="3982" from-port="0" to-layer="3983" to-port="1"/>
-		<edge from-layer="3983" from-port="2" to-layer="3984" to-port="1"/>
-		<edge from-layer="3984" from-port="2" to-layer="3988" to-port="0"/>
-		<edge from-layer="3984" from-port="2" to-layer="3999" to-port="0"/>
-		<edge from-layer="3984" from-port="2" to-layer="4009" to-port="0"/>
-		<edge from-layer="3985" from-port="0" to-layer="3999" to-port="1"/>
-		<edge from-layer="3986" from-port="0" to-layer="3997" to-port="0"/>
-		<edge from-layer="3987" from-port="0" to-layer="3997" to-port="1"/>
-		<edge from-layer="3988" from-port="1" to-layer="3991" to-port="0"/>
-		<edge from-layer="3989" from-port="0" to-layer="3991" to-port="1"/>
-		<edge from-layer="3990" from-port="0" to-layer="3991" to-port="2"/>
-		<edge from-layer="3991" from-port="3" to-layer="3993" to-port="0"/>
-		<edge from-layer="3992" from-port="0" to-layer="3993" to-port="1"/>
-		<edge from-layer="3993" from-port="2" to-layer="3995" to-port="0"/>
-		<edge from-layer="3994" from-port="0" to-layer="3995" to-port="1"/>
-		<edge from-layer="3995" from-port="2" to-layer="4006" to-port="0"/>
-		<edge from-layer="3995" from-port="2" to-layer="3997" to-port="2"/>
-		<edge from-layer="3995" from-port="2" to-layer="4003" to-port="2"/>
-		<edge from-layer="3996" from-port="0" to-layer="3997" to-port="3"/>
-		<edge from-layer="3997" from-port="4" to-layer="3999" to-port="2"/>
-		<edge from-layer="3998" from-port="0" to-layer="3999" to-port="3"/>
-		<edge from-layer="3999" from-port="4" to-layer="4011" to-port="0"/>
-		<edge from-layer="4000" from-port="0" to-layer="4003" to-port="0"/>
-		<edge from-layer="4001" from-port="0" to-layer="4003" to-port="1"/>
-		<edge from-layer="4001" from-port="0" to-layer="4007" to-port="1"/>
-		<edge from-layer="4002" from-port="0" to-layer="4003" to-port="3"/>
-		<edge from-layer="4002" from-port="0" to-layer="4007" to-port="3"/>
-		<edge from-layer="4003" from-port="4" to-layer="4009" to-port="1"/>
-		<edge from-layer="4004" from-port="0" to-layer="4007" to-port="0"/>
-		<edge from-layer="4005" from-port="0" to-layer="4006" to-port="1"/>
-		<edge from-layer="4006" from-port="2" to-layer="4007" to-port="2"/>
-		<edge from-layer="4007" from-port="4" to-layer="4009" to-port="2"/>
-		<edge from-layer="4008" from-port="0" to-layer="4009" to-port="3"/>
-		<edge from-layer="4009" from-port="4" to-layer="4010" to-port="0"/>
-		<edge from-layer="4010" from-port="1" to-layer="4011" to-port="1"/>
-		<edge from-layer="4011" from-port="2" to-layer="4013" to-port="0"/>
-		<edge from-layer="4012" from-port="0" to-layer="4013" to-port="1"/>
-		<edge from-layer="4013" from-port="2" to-layer="4014" to-port="1"/>
-		<edge from-layer="4014" from-port="2" to-layer="4015" to-port="0"/>
-		<edge from-layer="4015" from-port="2" to-layer="4019" to-port="0"/>
-		<edge from-layer="4016" from-port="0" to-layer="4018" to-port="1"/>
-		<edge from-layer="4017" from-port="0" to-layer="4018" to-port="2"/>
-		<edge from-layer="4018" from-port="3" to-layer="4019" to-port="1"/>
-		<edge from-layer="4019" from-port="2" to-layer="4021" to-port="0"/>
-		<edge from-layer="4020" from-port="0" to-layer="4021" to-port="1"/>
-		<edge from-layer="4021" from-port="2" to-layer="4023" to-port="0"/>
-		<edge from-layer="4022" from-port="0" to-layer="4023" to-port="1"/>
-		<edge from-layer="4023" from-port="2" to-layer="4025" to-port="0"/>
-		<edge from-layer="4024" from-port="0" to-layer="4025" to-port="1"/>
-		<edge from-layer="4025" from-port="2" to-layer="4026" to-port="0"/>
-		<edge from-layer="4026" from-port="2" to-layer="4027" to-port="0"/>
-		<edge from-layer="4027" from-port="2" to-layer="4029" to-port="0"/>
-		<edge from-layer="4027" from-port="2" to-layer="4033" to-port="0"/>
-		<edge from-layer="4027" from-port="2" to-layer="4036" to-port="0"/>
-		<edge from-layer="4028" from-port="0" to-layer="4029" to-port="1"/>
-		<edge from-layer="4029" from-port="2" to-layer="4031" to-port="0"/>
-		<edge from-layer="4030" from-port="0" to-layer="4031" to-port="1"/>
-		<edge from-layer="4031" from-port="2" to-layer="4072" to-port="0"/>
-		<edge from-layer="4032" from-port="0" to-layer="4033" to-port="1"/>
-		<edge from-layer="4033" from-port="2" to-layer="4035" to-port="0"/>
-		<edge from-layer="4034" from-port="0" to-layer="4035" to-port="1"/>
-		<edge from-layer="4035" from-port="2" to-layer="4037" to-port="0"/>
-		<edge from-layer="4036" from-port="1" to-layer="4037" to-port="1"/>
-		<edge from-layer="4037" from-port="2" to-layer="4039" to-port="0"/>
-		<edge from-layer="4038" from-port="0" to-layer="4039" to-port="1"/>
-		<edge from-layer="4039" from-port="2" to-layer="4041" to-port="0"/>
-		<edge from-layer="4040" from-port="0" to-layer="4041" to-port="1"/>
-		<edge from-layer="4041" from-port="2" to-layer="4042" to-port="0"/>
-		<edge from-layer="4042" from-port="1" to-layer="4044" to-port="0"/>
-		<edge from-layer="4043" from-port="0" to-layer="4044" to-port="1"/>
-		<edge from-layer="4044" from-port="2" to-layer="4046" to-port="0"/>
-		<edge from-layer="4045" from-port="0" to-layer="4046" to-port="1"/>
-		<edge from-layer="4046" from-port="2" to-layer="4056" to-port="0"/>
-		<edge from-layer="4047" from-port="1" to-layer="4049" to-port="0"/>
-		<edge from-layer="4048" from-port="0" to-layer="4049" to-port="1"/>
-		<edge from-layer="4049" from-port="2" to-layer="4051" to-port="0"/>
-		<edge from-layer="4050" from-port="0" to-layer="4051" to-port="1"/>
-		<edge from-layer="4051" from-port="2" to-layer="4053" to-port="0"/>
-		<edge from-layer="4052" from-port="0" to-layer="4053" to-port="1"/>
-		<edge from-layer="4053" from-port="2" to-layer="4055" to-port="0"/>
-		<edge from-layer="4054" from-port="0" to-layer="4055" to-port="1"/>
-		<edge from-layer="4055" from-port="2" to-layer="4056" to-port="1"/>
-		<edge from-layer="4056" from-port="2" to-layer="4061" to-port="0"/>
-		<edge from-layer="4056" from-port="2" to-layer="4058" to-port="0"/>
-		<edge from-layer="4057" from-port="0" to-layer="4058" to-port="1"/>
-		<edge from-layer="4058" from-port="2" to-layer="4060" to-port="0"/>
-		<edge from-layer="4059" from-port="0" to-layer="4060" to-port="1"/>
-		<edge from-layer="4060" from-port="2" to-layer="4062" to-port="0"/>
-		<edge from-layer="4061" from-port="1" to-layer="4062" to-port="1"/>
-		<edge from-layer="4062" from-port="2" to-layer="4064" to-port="0"/>
-		<edge from-layer="4063" from-port="0" to-layer="4064" to-port="1"/>
-		<edge from-layer="4064" from-port="2" to-layer="4066" to-port="0"/>
-		<edge from-layer="4065" from-port="0" to-layer="4066" to-port="1"/>
-		<edge from-layer="4066" from-port="2" to-layer="4067" to-port="0"/>
-		<edge from-layer="4067" from-port="1" to-layer="4069" to-port="0"/>
-		<edge from-layer="4068" from-port="0" to-layer="4069" to-port="1"/>
-		<edge from-layer="4069" from-port="2" to-layer="4071" to-port="0"/>
-		<edge from-layer="4070" from-port="0" to-layer="4071" to-port="1"/>
-		<edge from-layer="4071" from-port="2" to-layer="4072" to-port="1"/>
-		<edge from-layer="4072" from-port="2" to-layer="4384" to-port="1"/>
-		<edge from-layer="4072" from-port="2" to-layer="4074" to-port="0"/>
-		<edge from-layer="4072" from-port="2" to-layer="4077" to-port="0"/>
-		<edge from-layer="4073" from-port="0" to-layer="4074" to-port="1"/>
-		<edge from-layer="4074" from-port="2" to-layer="4076" to-port="0"/>
-		<edge from-layer="4075" from-port="0" to-layer="4076" to-port="1"/>
-		<edge from-layer="4076" from-port="2" to-layer="4078" to-port="0"/>
-		<edge from-layer="4077" from-port="1" to-layer="4094" to-port="0"/>
-		<edge from-layer="4077" from-port="1" to-layer="4091" to-port="0"/>
-		<edge from-layer="4077" from-port="1" to-layer="4097" to-port="0"/>
-		<edge from-layer="4077" from-port="1" to-layer="4078" to-port="1"/>
-		<edge from-layer="4077" from-port="1" to-layer="4103" to-port="0"/>
-		<edge from-layer="4077" from-port="1" to-layer="4376" to-port="0"/>
-		<edge from-layer="4078" from-port="2" to-layer="4080" to-port="0"/>
-		<edge from-layer="4079" from-port="0" to-layer="4080" to-port="1"/>
-		<edge from-layer="4080" from-port="2" to-layer="4082" to-port="0"/>
-		<edge from-layer="4081" from-port="0" to-layer="4082" to-port="1"/>
-		<edge from-layer="4082" from-port="2" to-layer="4084" to-port="0"/>
-		<edge from-layer="4083" from-port="0" to-layer="4084" to-port="1"/>
-		<edge from-layer="4084" from-port="2" to-layer="4086" to-port="0"/>
-		<edge from-layer="4085" from-port="0" to-layer="4086" to-port="1"/>
-		<edge from-layer="4086" from-port="2" to-layer="4088" to-port="0"/>
-		<edge from-layer="4087" from-port="0" to-layer="4088" to-port="1"/>
-		<edge from-layer="4088" from-port="2" to-layer="4105" to-port="0"/>
-		<edge from-layer="4089" from-port="0" to-layer="4091" to-port="1"/>
-		<edge from-layer="4090" from-port="0" to-layer="4091" to-port="2"/>
-		<edge from-layer="4091" from-port="3" to-layer="4104" to-port="0"/>
-		<edge from-layer="4092" from-port="0" to-layer="4094" to-port="1"/>
-		<edge from-layer="4093" from-port="0" to-layer="4094" to-port="2"/>
-		<edge from-layer="4094" from-port="3" to-layer="4098" to-port="0"/>
-		<edge from-layer="4095" from-port="0" to-layer="4097" to-port="1"/>
-		<edge from-layer="4096" from-port="0" to-layer="4097" to-port="2"/>
-		<edge from-layer="4097" from-port="3" to-layer="4098" to-port="1"/>
-		<edge from-layer="4098" from-port="2" to-layer="4100" to-port="0"/>
-		<edge from-layer="4099" from-port="0" to-layer="4100" to-port="1"/>
-		<edge from-layer="4100" from-port="2" to-layer="4104" to-port="1"/>
-		<edge from-layer="4101" from-port="0" to-layer="4103" to-port="1"/>
-		<edge from-layer="4102" from-port="0" to-layer="4103" to-port="2"/>
-		<edge from-layer="4103" from-port="3" to-layer="4104" to-port="2"/>
-		<edge from-layer="4104" from-port="3" to-layer="4105" to-port="1"/>
-		<edge from-layer="4105" from-port="2" to-layer="4244" to-port="1"/>
-		<edge from-layer="4105" from-port="2" to-layer="4107" to-port="0"/>
-		<edge from-layer="4106" from-port="0" to-layer="4107" to-port="1"/>
-		<edge from-layer="4107" from-port="2" to-layer="4109" to-port="0"/>
-		<edge from-layer="4108" from-port="0" to-layer="4109" to-port="1"/>
-		<edge from-layer="4109" from-port="2" to-layer="4111" to-port="0"/>
-		<edge from-layer="4110" from-port="0" to-layer="4111" to-port="1"/>
-		<edge from-layer="4111" from-port="2" to-layer="4144" to-port="0"/>
-		<edge from-layer="4111" from-port="2" to-layer="4113" to-port="0"/>
-		<edge from-layer="4111" from-port="2" to-layer="4179" to-port="0"/>
-		<edge from-layer="4112" from-port="0" to-layer="4113" to-port="1"/>
-		<edge from-layer="4113" from-port="2" to-layer="4124" to-port="0"/>
-		<edge from-layer="4113" from-port="2" to-layer="4115" to-port="0"/>
-		<edge from-layer="4114" from-port="0" to-layer="4123" to-port="0"/>
-		<edge from-layer="4115" from-port="1" to-layer="4129" to-port="0"/>
-		<edge from-layer="4115" from-port="1" to-layer="4136" to-port="0"/>
-		<edge from-layer="4115" from-port="1" to-layer="4118" to-port="0"/>
-		<edge from-layer="4116" from-port="0" to-layer="4118" to-port="1"/>
-		<edge from-layer="4117" from-port="0" to-layer="4118" to-port="2"/>
-		<edge from-layer="4118" from-port="3" to-layer="4138" to-port="0"/>
-		<edge from-layer="4118" from-port="3" to-layer="4120" to-port="0"/>
-		<edge from-layer="4119" from-port="0" to-layer="4120" to-port="1"/>
-		<edge from-layer="4120" from-port="2" to-layer="4122" to-port="0"/>
-		<edge from-layer="4121" from-port="0" to-layer="4122" to-port="1"/>
-		<edge from-layer="4122" from-port="2" to-layer="4123" to-port="2"/>
-		<edge from-layer="4123" from-port="3" to-layer="4124" to-port="1"/>
-		<edge from-layer="4124" from-port="2" to-layer="4126" to-port="0"/>
-		<edge from-layer="4125" from-port="0" to-layer="4126" to-port="1"/>
-		<edge from-layer="4126" from-port="2" to-layer="4142" to-port="0"/>
-		<edge from-layer="4127" from-port="0" to-layer="4129" to-port="1"/>
-		<edge from-layer="4128" from-port="0" to-layer="4129" to-port="2"/>
-		<edge from-layer="4129" from-port="3" to-layer="4131" to-port="0"/>
-		<edge from-layer="4130" from-port="0" to-layer="4131" to-port="1"/>
-		<edge from-layer="4131" from-port="2" to-layer="4133" to-port="0"/>
-		<edge from-layer="4132" from-port="0" to-layer="4133" to-port="1"/>
-		<edge from-layer="4133" from-port="2" to-layer="4141" to-port="0"/>
-		<edge from-layer="4134" from-port="0" to-layer="4136" to-port="1"/>
-		<edge from-layer="4135" from-port="0" to-layer="4136" to-port="2"/>
-		<edge from-layer="4136" from-port="3" to-layer="4141" to-port="1"/>
-		<edge from-layer="4137" from-port="0" to-layer="4138" to-port="1"/>
-		<edge from-layer="4138" from-port="2" to-layer="4140" to-port="0"/>
-		<edge from-layer="4139" from-port="0" to-layer="4140" to-port="1"/>
-		<edge from-layer="4140" from-port="2" to-layer="4141" to-port="2"/>
-		<edge from-layer="4141" from-port="3" to-layer="4142" to-port="1"/>
-		<edge from-layer="4142" from-port="2" to-layer="4174" to-port="0"/>
-		<edge from-layer="4143" from-port="0" to-layer="4144" to-port="1"/>
-		<edge from-layer="4144" from-port="2" to-layer="4146" to-port="0"/>
-		<edge from-layer="4144" from-port="2" to-layer="4155" to-port="0"/>
-		<edge from-layer="4145" from-port="0" to-layer="4154" to-port="0"/>
-		<edge from-layer="4146" from-port="1" to-layer="4167" to-port="0"/>
-		<edge from-layer="4146" from-port="1" to-layer="4149" to-port="0"/>
-		<edge from-layer="4146" from-port="1" to-layer="4160" to-port="0"/>
-		<edge from-layer="4147" from-port="0" to-layer="4149" to-port="1"/>
-		<edge from-layer="4148" from-port="0" to-layer="4149" to-port="2"/>
-		<edge from-layer="4149" from-port="3" to-layer="4169" to-port="0"/>
-		<edge from-layer="4149" from-port="3" to-layer="4151" to-port="0"/>
-		<edge from-layer="4150" from-port="0" to-layer="4151" to-port="1"/>
-		<edge from-layer="4151" from-port="2" to-layer="4153" to-port="0"/>
-		<edge from-layer="4152" from-port="0" to-layer="4153" to-port="1"/>
-		<edge from-layer="4153" from-port="2" to-layer="4154" to-port="2"/>
-		<edge from-layer="4154" from-port="3" to-layer="4155" to-port="1"/>
-		<edge from-layer="4155" from-port="2" to-layer="4157" to-port="0"/>
-		<edge from-layer="4156" from-port="0" to-layer="4157" to-port="1"/>
-		<edge from-layer="4157" from-port="2" to-layer="4173" to-port="0"/>
-		<edge from-layer="4158" from-port="0" to-layer="4160" to-port="1"/>
-		<edge from-layer="4159" from-port="0" to-layer="4160" to-port="2"/>
-		<edge from-layer="4160" from-port="3" to-layer="4162" to-port="0"/>
-		<edge from-layer="4161" from-port="0" to-layer="4162" to-port="1"/>
-		<edge from-layer="4162" from-port="2" to-layer="4164" to-port="0"/>
-		<edge from-layer="4163" from-port="0" to-layer="4164" to-port="1"/>
-		<edge from-layer="4164" from-port="2" to-layer="4172" to-port="0"/>
-		<edge from-layer="4165" from-port="0" to-layer="4167" to-port="1"/>
-		<edge from-layer="4166" from-port="0" to-layer="4167" to-port="2"/>
-		<edge from-layer="4167" from-port="3" to-layer="4172" to-port="1"/>
-		<edge from-layer="4168" from-port="0" to-layer="4169" to-port="1"/>
-		<edge from-layer="4169" from-port="2" to-layer="4171" to-port="0"/>
-		<edge from-layer="4170" from-port="0" to-layer="4171" to-port="1"/>
-		<edge from-layer="4171" from-port="2" to-layer="4172" to-port="2"/>
-		<edge from-layer="4172" from-port="3" to-layer="4173" to-port="1"/>
-		<edge from-layer="4173" from-port="2" to-layer="4174" to-port="1"/>
-		<edge from-layer="4174" from-port="2" to-layer="4176" to-port="0"/>
-		<edge from-layer="4175" from-port="0" to-layer="4176" to-port="1"/>
-		<edge from-layer="4176" from-port="2" to-layer="4177" to-port="0"/>
-		<edge from-layer="4177" from-port="1" to-layer="4209" to-port="0"/>
-		<edge from-layer="4178" from-port="0" to-layer="4179" to-port="1"/>
-		<edge from-layer="4179" from-port="2" to-layer="4190" to-port="0"/>
-		<edge from-layer="4179" from-port="2" to-layer="4181" to-port="0"/>
-		<edge from-layer="4180" from-port="0" to-layer="4189" to-port="0"/>
-		<edge from-layer="4181" from-port="1" to-layer="4184" to-port="0"/>
-		<edge from-layer="4181" from-port="1" to-layer="4195" to-port="0"/>
-		<edge from-layer="4181" from-port="1" to-layer="4202" to-port="0"/>
-		<edge from-layer="4182" from-port="0" to-layer="4184" to-port="1"/>
-		<edge from-layer="4183" from-port="0" to-layer="4184" to-port="2"/>
-		<edge from-layer="4184" from-port="3" to-layer="4204" to-port="0"/>
-		<edge from-layer="4184" from-port="3" to-layer="4186" to-port="0"/>
-		<edge from-layer="4185" from-port="0" to-layer="4186" to-port="1"/>
-		<edge from-layer="4186" from-port="2" to-layer="4188" to-port="0"/>
-		<edge from-layer="4187" from-port="0" to-layer="4188" to-port="1"/>
-		<edge from-layer="4188" from-port="2" to-layer="4189" to-port="2"/>
-		<edge from-layer="4189" from-port="3" to-layer="4190" to-port="1"/>
-		<edge from-layer="4190" from-port="2" to-layer="4192" to-port="0"/>
-		<edge from-layer="4191" from-port="0" to-layer="4192" to-port="1"/>
-		<edge from-layer="4192" from-port="2" to-layer="4208" to-port="0"/>
-		<edge from-layer="4193" from-port="0" to-layer="4195" to-port="1"/>
-		<edge from-layer="4194" from-port="0" to-layer="4195" to-port="2"/>
-		<edge from-layer="4195" from-port="3" to-layer="4197" to-port="0"/>
-		<edge from-layer="4196" from-port="0" to-layer="4197" to-port="1"/>
-		<edge from-layer="4197" from-port="2" to-layer="4199" to-port="0"/>
-		<edge from-layer="4198" from-port="0" to-layer="4199" to-port="1"/>
-		<edge from-layer="4199" from-port="2" to-layer="4207" to-port="0"/>
-		<edge from-layer="4200" from-port="0" to-layer="4202" to-port="1"/>
-		<edge from-layer="4201" from-port="0" to-layer="4202" to-port="2"/>
-		<edge from-layer="4202" from-port="3" to-layer="4207" to-port="1"/>
-		<edge from-layer="4203" from-port="0" to-layer="4204" to-port="1"/>
-		<edge from-layer="4204" from-port="2" to-layer="4206" to-port="0"/>
-		<edge from-layer="4205" from-port="0" to-layer="4206" to-port="1"/>
-		<edge from-layer="4206" from-port="2" to-layer="4207" to-port="2"/>
-		<edge from-layer="4207" from-port="3" to-layer="4208" to-port="1"/>
-		<edge from-layer="4208" from-port="2" to-layer="4209" to-port="1"/>
-		<edge from-layer="4209" from-port="2" to-layer="4210" to-port="0"/>
-		<edge from-layer="4209" from-port="2" to-layer="4222" to-port="0"/>
-		<edge from-layer="4210" from-port="1" to-layer="4213" to-port="0"/>
-		<edge from-layer="4210" from-port="1" to-layer="4220" to-port="0"/>
-		<edge from-layer="4210" from-port="1" to-layer="4234" to-port="0"/>
-		<edge from-layer="4210" from-port="1" to-layer="4231" to-port="0"/>
-		<edge from-layer="4211" from-port="0" to-layer="4213" to-port="1"/>
-		<edge from-layer="4212" from-port="0" to-layer="4213" to-port="2"/>
-		<edge from-layer="4213" from-port="3" to-layer="4226" to-port="0"/>
-		<edge from-layer="4213" from-port="3" to-layer="4215" to-port="0"/>
-		<edge from-layer="4214" from-port="0" to-layer="4215" to-port="1"/>
-		<edge from-layer="4215" from-port="2" to-layer="4217" to-port="0"/>
-		<edge from-layer="4216" from-port="0" to-layer="4217" to-port="1"/>
-		<edge from-layer="4217" from-port="2" to-layer="4221" to-port="0"/>
-		<edge from-layer="4218" from-port="0" to-layer="4220" to-port="1"/>
-		<edge from-layer="4219" from-port="0" to-layer="4220" to-port="2"/>
-		<edge from-layer="4220" from-port="3" to-layer="4221" to-port="2"/>
-		<edge from-layer="4221" from-port="3" to-layer="4222" to-port="1"/>
-		<edge from-layer="4222" from-port="2" to-layer="4224" to-port="0"/>
-		<edge from-layer="4223" from-port="0" to-layer="4224" to-port="1"/>
-		<edge from-layer="4224" from-port="2" to-layer="4240" to-port="0"/>
-		<edge from-layer="4225" from-port="0" to-layer="4226" to-port="1"/>
-		<edge from-layer="4226" from-port="2" to-layer="4228" to-port="0"/>
-		<edge from-layer="4227" from-port="0" to-layer="4228" to-port="1"/>
-		<edge from-layer="4228" from-port="2" to-layer="4239" to-port="0"/>
-		<edge from-layer="4229" from-port="0" to-layer="4231" to-port="1"/>
-		<edge from-layer="4230" from-port="0" to-layer="4231" to-port="2"/>
-		<edge from-layer="4231" from-port="3" to-layer="4239" to-port="1"/>
-		<edge from-layer="4232" from-port="0" to-layer="4234" to-port="1"/>
-		<edge from-layer="4233" from-port="0" to-layer="4234" to-port="2"/>
-		<edge from-layer="4234" from-port="3" to-layer="4236" to-port="0"/>
-		<edge from-layer="4235" from-port="0" to-layer="4236" to-port="1"/>
-		<edge from-layer="4236" from-port="2" to-layer="4238" to-port="0"/>
-		<edge from-layer="4237" from-port="0" to-layer="4238" to-port="1"/>
-		<edge from-layer="4238" from-port="2" to-layer="4239" to-port="2"/>
-		<edge from-layer="4239" from-port="3" to-layer="4240" to-port="1"/>
-		<edge from-layer="4240" from-port="2" to-layer="4242" to-port="0"/>
-		<edge from-layer="4241" from-port="0" to-layer="4242" to-port="1"/>
-		<edge from-layer="4242" from-port="2" to-layer="4243" to-port="1"/>
-		<edge from-layer="4243" from-port="2" to-layer="4244" to-port="0"/>
-		<edge from-layer="4244" from-port="2" to-layer="4246" to-port="0"/>
-		<edge from-layer="4244" from-port="2" to-layer="4333" to-port="1"/>
-		<edge from-layer="4245" from-port="0" to-layer="4246" to-port="1"/>
-		<edge from-layer="4246" from-port="2" to-layer="4248" to-port="0"/>
-		<edge from-layer="4247" from-port="0" to-layer="4248" to-port="1"/>
-		<edge from-layer="4248" from-port="2" to-layer="4250" to-port="0"/>
-		<edge from-layer="4249" from-port="0" to-layer="4250" to-port="1"/>
-		<edge from-layer="4250" from-port="2" to-layer="4252" to-port="0"/>
-		<edge from-layer="4251" from-port="0" to-layer="4252" to-port="1"/>
-		<edge from-layer="4252" from-port="2" to-layer="4254" to-port="0"/>
-		<edge from-layer="4252" from-port="2" to-layer="4263" to-port="0"/>
-		<edge from-layer="4253" from-port="0" to-layer="4262" to-port="0"/>
-		<edge from-layer="4254" from-port="1" to-layer="4275" to-port="0"/>
-		<edge from-layer="4254" from-port="1" to-layer="4268" to-port="0"/>
-		<edge from-layer="4254" from-port="1" to-layer="4257" to-port="0"/>
-		<edge from-layer="4255" from-port="0" to-layer="4257" to-port="1"/>
-		<edge from-layer="4256" from-port="0" to-layer="4257" to-port="2"/>
-		<edge from-layer="4257" from-port="3" to-layer="4277" to-port="0"/>
-		<edge from-layer="4257" from-port="3" to-layer="4259" to-port="0"/>
-		<edge from-layer="4258" from-port="0" to-layer="4259" to-port="1"/>
-		<edge from-layer="4259" from-port="2" to-layer="4261" to-port="0"/>
-		<edge from-layer="4260" from-port="0" to-layer="4261" to-port="1"/>
-		<edge from-layer="4261" from-port="2" to-layer="4262" to-port="2"/>
-		<edge from-layer="4262" from-port="3" to-layer="4263" to-port="1"/>
-		<edge from-layer="4263" from-port="2" to-layer="4265" to-port="0"/>
-		<edge from-layer="4264" from-port="0" to-layer="4265" to-port="1"/>
-		<edge from-layer="4265" from-port="2" to-layer="4281" to-port="0"/>
-		<edge from-layer="4266" from-port="0" to-layer="4268" to-port="1"/>
-		<edge from-layer="4267" from-port="0" to-layer="4268" to-port="2"/>
-		<edge from-layer="4268" from-port="3" to-layer="4270" to-port="0"/>
-		<edge from-layer="4269" from-port="0" to-layer="4270" to-port="1"/>
-		<edge from-layer="4270" from-port="2" to-layer="4272" to-port="0"/>
-		<edge from-layer="4271" from-port="0" to-layer="4272" to-port="1"/>
-		<edge from-layer="4272" from-port="2" to-layer="4280" to-port="0"/>
-		<edge from-layer="4273" from-port="0" to-layer="4275" to-port="1"/>
-		<edge from-layer="4274" from-port="0" to-layer="4275" to-port="2"/>
-		<edge from-layer="4275" from-port="3" to-layer="4280" to-port="1"/>
-		<edge from-layer="4276" from-port="0" to-layer="4277" to-port="1"/>
-		<edge from-layer="4277" from-port="2" to-layer="4279" to-port="0"/>
-		<edge from-layer="4278" from-port="0" to-layer="4279" to-port="1"/>
-		<edge from-layer="4279" from-port="2" to-layer="4280" to-port="2"/>
-		<edge from-layer="4280" from-port="3" to-layer="4281" to-port="1"/>
-		<edge from-layer="4281" from-port="2" to-layer="4288" to-port="0"/>
-		<edge from-layer="4282" from-port="0" to-layer="4283" to-port="1"/>
-		<edge from-layer="4283" from-port="2" to-layer="4284" to-port="0"/>
-		<edge from-layer="4284" from-port="2" to-layer="4286" to-port="0"/>
-		<edge from-layer="4285" from-port="0" to-layer="4286" to-port="1"/>
-		<edge from-layer="4286" from-port="2" to-layer="4287" to-port="0"/>
-		<edge from-layer="4287" from-port="2" to-layer="4288" to-port="1"/>
-		<edge from-layer="4288" from-port="2" to-layer="4290" to-port="0"/>
-		<edge from-layer="4289" from-port="0" to-layer="4290" to-port="1"/>
-		<edge from-layer="4290" from-port="2" to-layer="4291" to-port="0"/>
-		<edge from-layer="4291" from-port="1" to-layer="4298" to-port="0"/>
-		<edge from-layer="4292" from-port="0" to-layer="4293" to-port="1"/>
-		<edge from-layer="4293" from-port="2" to-layer="4294" to-port="0"/>
-		<edge from-layer="4294" from-port="2" to-layer="4296" to-port="0"/>
-		<edge from-layer="4295" from-port="0" to-layer="4296" to-port="1"/>
-		<edge from-layer="4296" from-port="2" to-layer="4297" to-port="0"/>
-		<edge from-layer="4297" from-port="2" to-layer="4298" to-port="1"/>
-		<edge from-layer="4298" from-port="2" to-layer="4299" to-port="0"/>
-		<edge from-layer="4298" from-port="2" to-layer="4311" to-port="0"/>
-		<edge from-layer="4299" from-port="1" to-layer="4323" to-port="0"/>
-		<edge from-layer="4299" from-port="1" to-layer="4320" to-port="0"/>
-		<edge from-layer="4299" from-port="1" to-layer="4309" to-port="0"/>
-		<edge from-layer="4299" from-port="1" to-layer="4302" to-port="0"/>
-		<edge from-layer="4300" from-port="0" to-layer="4302" to-port="1"/>
-		<edge from-layer="4301" from-port="0" to-layer="4302" to-port="2"/>
-		<edge from-layer="4302" from-port="3" to-layer="4304" to-port="0"/>
-		<edge from-layer="4302" from-port="3" to-layer="4315" to-port="0"/>
-		<edge from-layer="4303" from-port="0" to-layer="4304" to-port="1"/>
-		<edge from-layer="4304" from-port="2" to-layer="4306" to-port="0"/>
-		<edge from-layer="4305" from-port="0" to-layer="4306" to-port="1"/>
-		<edge from-layer="4306" from-port="2" to-layer="4310" to-port="0"/>
-		<edge from-layer="4307" from-port="0" to-layer="4309" to-port="1"/>
-		<edge from-layer="4308" from-port="0" to-layer="4309" to-port="2"/>
-		<edge from-layer="4309" from-port="3" to-layer="4310" to-port="2"/>
-		<edge from-layer="4310" from-port="3" to-layer="4311" to-port="1"/>
-		<edge from-layer="4311" from-port="2" to-layer="4313" to-port="0"/>
-		<edge from-layer="4312" from-port="0" to-layer="4313" to-port="1"/>
-		<edge from-layer="4313" from-port="2" to-layer="4329" to-port="0"/>
-		<edge from-layer="4314" from-port="0" to-layer="4315" to-port="1"/>
-		<edge from-layer="4315" from-port="2" to-layer="4317" to-port="0"/>
-		<edge from-layer="4316" from-port="0" to-layer="4317" to-port="1"/>
-		<edge from-layer="4317" from-port="2" to-layer="4328" to-port="0"/>
-		<edge from-layer="4318" from-port="0" to-layer="4320" to-port="1"/>
-		<edge from-layer="4319" from-port="0" to-layer="4320" to-port="2"/>
-		<edge from-layer="4320" from-port="3" to-layer="4328" to-port="1"/>
-		<edge from-layer="4321" from-port="0" to-layer="4323" to-port="1"/>
-		<edge from-layer="4322" from-port="0" to-layer="4323" to-port="2"/>
-		<edge from-layer="4323" from-port="3" to-layer="4325" to-port="0"/>
-		<edge from-layer="4324" from-port="0" to-layer="4325" to-port="1"/>
-		<edge from-layer="4325" from-port="2" to-layer="4327" to-port="0"/>
-		<edge from-layer="4326" from-port="0" to-layer="4327" to-port="1"/>
-		<edge from-layer="4327" from-port="2" to-layer="4328" to-port="2"/>
-		<edge from-layer="4328" from-port="3" to-layer="4329" to-port="1"/>
-		<edge from-layer="4329" from-port="2" to-layer="4331" to-port="0"/>
-		<edge from-layer="4330" from-port="0" to-layer="4331" to-port="1"/>
-		<edge from-layer="4331" from-port="2" to-layer="4332" to-port="1"/>
-		<edge from-layer="4332" from-port="2" to-layer="4333" to-port="0"/>
-		<edge from-layer="4333" from-port="2" to-layer="4373" to-port="1"/>
-		<edge from-layer="4333" from-port="2" to-layer="4335" to-port="0"/>
-		<edge from-layer="4334" from-port="0" to-layer="4335" to-port="1"/>
-		<edge from-layer="4335" from-port="2" to-layer="4337" to-port="0"/>
-		<edge from-layer="4336" from-port="0" to-layer="4337" to-port="1"/>
-		<edge from-layer="4337" from-port="2" to-layer="4339" to-port="0"/>
-		<edge from-layer="4338" from-port="0" to-layer="4339" to-port="1"/>
-		<edge from-layer="4339" from-port="2" to-layer="4341" to-port="0"/>
-		<edge from-layer="4340" from-port="0" to-layer="4341" to-port="1"/>
-		<edge from-layer="4341" from-port="2" to-layer="4342" to-port="1"/>
-		<edge from-layer="4342" from-port="2" to-layer="4346" to-port="0"/>
-		<edge from-layer="4342" from-port="2" to-layer="4357" to-port="0"/>
-		<edge from-layer="4342" from-port="2" to-layer="4367" to-port="0"/>
-		<edge from-layer="4343" from-port="0" to-layer="4357" to-port="1"/>
-		<edge from-layer="4344" from-port="0" to-layer="4355" to-port="0"/>
-		<edge from-layer="4345" from-port="0" to-layer="4355" to-port="1"/>
-		<edge from-layer="4346" from-port="1" to-layer="4349" to-port="0"/>
-		<edge from-layer="4347" from-port="0" to-layer="4349" to-port="1"/>
-		<edge from-layer="4348" from-port="0" to-layer="4349" to-port="2"/>
-		<edge from-layer="4349" from-port="3" to-layer="4351" to-port="0"/>
-		<edge from-layer="4350" from-port="0" to-layer="4351" to-port="1"/>
-		<edge from-layer="4351" from-port="2" to-layer="4353" to-port="0"/>
-		<edge from-layer="4352" from-port="0" to-layer="4353" to-port="1"/>
-		<edge from-layer="4353" from-port="2" to-layer="4361" to-port="2"/>
-		<edge from-layer="4353" from-port="2" to-layer="4364" to-port="0"/>
-		<edge from-layer="4353" from-port="2" to-layer="4355" to-port="2"/>
-		<edge from-layer="4354" from-port="0" to-layer="4355" to-port="3"/>
-		<edge from-layer="4355" from-port="4" to-layer="4357" to-port="2"/>
-		<edge from-layer="4356" from-port="0" to-layer="4357" to-port="3"/>
-		<edge from-layer="4357" from-port="4" to-layer="4369" to-port="0"/>
-		<edge from-layer="4358" from-port="0" to-layer="4361" to-port="0"/>
-		<edge from-layer="4359" from-port="0" to-layer="4361" to-port="1"/>
-		<edge from-layer="4359" from-port="0" to-layer="4365" to-port="1"/>
-		<edge from-layer="4360" from-port="0" to-layer="4365" to-port="3"/>
-		<edge from-layer="4360" from-port="0" to-layer="4361" to-port="3"/>
-		<edge from-layer="4361" from-port="4" to-layer="4367" to-port="1"/>
-		<edge from-layer="4362" from-port="0" to-layer="4365" to-port="0"/>
-		<edge from-layer="4363" from-port="0" to-layer="4364" to-port="1"/>
-		<edge from-layer="4364" from-port="2" to-layer="4365" to-port="2"/>
-		<edge from-layer="4365" from-port="4" to-layer="4367" to-port="2"/>
-		<edge from-layer="4366" from-port="0" to-layer="4367" to-port="3"/>
-		<edge from-layer="4367" from-port="4" to-layer="4368" to-port="0"/>
-		<edge from-layer="4368" from-port="1" to-layer="4369" to-port="1"/>
-		<edge from-layer="4369" from-port="2" to-layer="4371" to-port="0"/>
-		<edge from-layer="4370" from-port="0" to-layer="4371" to-port="1"/>
-		<edge from-layer="4371" from-port="2" to-layer="4372" to-port="1"/>
-		<edge from-layer="4372" from-port="2" to-layer="4373" to-port="0"/>
-		<edge from-layer="4373" from-port="2" to-layer="4377" to-port="0"/>
-		<edge from-layer="4374" from-port="0" to-layer="4376" to-port="1"/>
-		<edge from-layer="4375" from-port="0" to-layer="4376" to-port="2"/>
-		<edge from-layer="4376" from-port="3" to-layer="4377" to-port="1"/>
-		<edge from-layer="4377" from-port="2" to-layer="4379" to-port="0"/>
-		<edge from-layer="4378" from-port="0" to-layer="4379" to-port="1"/>
-		<edge from-layer="4379" from-port="2" to-layer="4381" to-port="0"/>
-		<edge from-layer="4380" from-port="0" to-layer="4381" to-port="1"/>
-		<edge from-layer="4381" from-port="2" to-layer="4383" to-port="0"/>
-		<edge from-layer="4382" from-port="0" to-layer="4383" to-port="1"/>
-		<edge from-layer="4383" from-port="2" to-layer="4384" to-port="0"/>
-		<edge from-layer="4384" from-port="2" to-layer="4389" to-port="0"/>
-		<edge from-layer="4384" from-port="2" to-layer="4385" to-port="0"/>
-		<edge from-layer="4385" from-port="1" to-layer="4386" to-port="0"/>
-		<edge from-layer="4386" from-port="1" to-layer="4387" to-port="0"/>
-		<edge from-layer="4387" from-port="2" to-layer="4388" to-port="0"/>
-		<edge from-layer="4388" from-port="1" to-layer="4389" to-port="1"/>
-		<edge from-layer="4389" from-port="3" to-layer="4391" to-port="0"/>
-		<edge from-layer="4390" from-port="0" to-layer="4391" to-port="1"/>
-		<edge from-layer="4391" from-port="2" to-layer="4393" to-port="0"/>
-		<edge from-layer="4392" from-port="0" to-layer="4393" to-port="1"/>
-		<edge from-layer="4393" from-port="2" to-layer="4394" to-port="0"/>
-		<edge from-layer="4394" from-port="2" to-layer="4396" to-port="0"/>
-		<edge from-layer="4394" from-port="2" to-layer="4400" to-port="0"/>
-		<edge from-layer="4394" from-port="2" to-layer="4403" to-port="0"/>
-		<edge from-layer="4395" from-port="0" to-layer="4396" to-port="1"/>
-		<edge from-layer="4396" from-port="2" to-layer="4398" to-port="0"/>
-		<edge from-layer="4397" from-port="0" to-layer="4398" to-port="1"/>
-		<edge from-layer="4398" from-port="2" to-layer="4439" to-port="0"/>
-		<edge from-layer="4399" from-port="0" to-layer="4400" to-port="1"/>
-		<edge from-layer="4400" from-port="2" to-layer="4402" to-port="0"/>
-		<edge from-layer="4401" from-port="0" to-layer="4402" to-port="1"/>
-		<edge from-layer="4402" from-port="2" to-layer="4404" to-port="0"/>
-		<edge from-layer="4403" from-port="1" to-layer="4404" to-port="1"/>
-		<edge from-layer="4404" from-port="2" to-layer="4406" to-port="0"/>
-		<edge from-layer="4405" from-port="0" to-layer="4406" to-port="1"/>
-		<edge from-layer="4406" from-port="2" to-layer="4408" to-port="0"/>
-		<edge from-layer="4407" from-port="0" to-layer="4408" to-port="1"/>
-		<edge from-layer="4408" from-port="2" to-layer="4409" to-port="0"/>
-		<edge from-layer="4409" from-port="1" to-layer="4411" to-port="0"/>
-		<edge from-layer="4410" from-port="0" to-layer="4411" to-port="1"/>
-		<edge from-layer="4411" from-port="2" to-layer="4413" to-port="0"/>
-		<edge from-layer="4412" from-port="0" to-layer="4413" to-port="1"/>
-		<edge from-layer="4413" from-port="2" to-layer="4423" to-port="0"/>
-		<edge from-layer="4414" from-port="1" to-layer="4416" to-port="0"/>
-		<edge from-layer="4415" from-port="0" to-layer="4416" to-port="1"/>
-		<edge from-layer="4416" from-port="2" to-layer="4418" to-port="0"/>
-		<edge from-layer="4417" from-port="0" to-layer="4418" to-port="1"/>
-		<edge from-layer="4418" from-port="2" to-layer="4420" to-port="0"/>
-		<edge from-layer="4419" from-port="0" to-layer="4420" to-port="1"/>
-		<edge from-layer="4420" from-port="2" to-layer="4422" to-port="0"/>
-		<edge from-layer="4421" from-port="0" to-layer="4422" to-port="1"/>
-		<edge from-layer="4422" from-port="2" to-layer="4423" to-port="1"/>
-		<edge from-layer="4423" from-port="2" to-layer="4428" to-port="0"/>
-		<edge from-layer="4423" from-port="2" to-layer="4425" to-port="0"/>
-		<edge from-layer="4424" from-port="0" to-layer="4425" to-port="1"/>
-		<edge from-layer="4425" from-port="2" to-layer="4427" to-port="0"/>
-		<edge from-layer="4426" from-port="0" to-layer="4427" to-port="1"/>
-		<edge from-layer="4427" from-port="2" to-layer="4429" to-port="0"/>
-		<edge from-layer="4428" from-port="1" to-layer="4429" to-port="1"/>
-		<edge from-layer="4429" from-port="2" to-layer="4431" to-port="0"/>
-		<edge from-layer="4430" from-port="0" to-layer="4431" to-port="1"/>
-		<edge from-layer="4431" from-port="2" to-layer="4433" to-port="0"/>
-		<edge from-layer="4432" from-port="0" to-layer="4433" to-port="1"/>
-		<edge from-layer="4433" from-port="2" to-layer="4434" to-port="0"/>
-		<edge from-layer="4434" from-port="1" to-layer="4436" to-port="0"/>
-		<edge from-layer="4435" from-port="0" to-layer="4436" to-port="1"/>
-		<edge from-layer="4436" from-port="2" to-layer="4438" to-port="0"/>
-		<edge from-layer="4437" from-port="0" to-layer="4438" to-port="1"/>
-		<edge from-layer="4438" from-port="2" to-layer="4439" to-port="1"/>
-		<edge from-layer="4439" from-port="2" to-layer="4444" to-port="0"/>
-		<edge from-layer="4439" from-port="2" to-layer="4751" to-port="1"/>
-		<edge from-layer="4439" from-port="2" to-layer="4441" to-port="0"/>
-		<edge from-layer="4440" from-port="0" to-layer="4441" to-port="1"/>
-		<edge from-layer="4441" from-port="2" to-layer="4443" to-port="0"/>
-		<edge from-layer="4442" from-port="0" to-layer="4443" to-port="1"/>
-		<edge from-layer="4443" from-port="2" to-layer="4445" to-port="0"/>
-		<edge from-layer="4444" from-port="1" to-layer="4461" to-port="0"/>
-		<edge from-layer="4444" from-port="1" to-layer="4458" to-port="0"/>
-		<edge from-layer="4444" from-port="1" to-layer="4470" to-port="0"/>
-		<edge from-layer="4444" from-port="1" to-layer="4445" to-port="1"/>
-		<edge from-layer="4444" from-port="1" to-layer="4743" to-port="0"/>
-		<edge from-layer="4444" from-port="1" to-layer="4464" to-port="0"/>
-		<edge from-layer="4445" from-port="2" to-layer="4447" to-port="0"/>
-		<edge from-layer="4446" from-port="0" to-layer="4447" to-port="1"/>
-		<edge from-layer="4447" from-port="2" to-layer="4449" to-port="0"/>
-		<edge from-layer="4448" from-port="0" to-layer="4449" to-port="1"/>
-		<edge from-layer="4449" from-port="2" to-layer="4451" to-port="0"/>
-		<edge from-layer="4450" from-port="0" to-layer="4451" to-port="1"/>
-		<edge from-layer="4451" from-port="2" to-layer="4453" to-port="0"/>
-		<edge from-layer="4452" from-port="0" to-layer="4453" to-port="1"/>
-		<edge from-layer="4453" from-port="2" to-layer="4455" to-port="0"/>
-		<edge from-layer="4454" from-port="0" to-layer="4455" to-port="1"/>
-		<edge from-layer="4455" from-port="2" to-layer="4472" to-port="0"/>
-		<edge from-layer="4456" from-port="0" to-layer="4458" to-port="1"/>
-		<edge from-layer="4457" from-port="0" to-layer="4458" to-port="2"/>
-		<edge from-layer="4458" from-port="3" to-layer="4471" to-port="0"/>
-		<edge from-layer="4459" from-port="0" to-layer="4461" to-port="1"/>
-		<edge from-layer="4460" from-port="0" to-layer="4461" to-port="2"/>
-		<edge from-layer="4461" from-port="3" to-layer="4465" to-port="0"/>
-		<edge from-layer="4462" from-port="0" to-layer="4464" to-port="1"/>
-		<edge from-layer="4463" from-port="0" to-layer="4464" to-port="2"/>
-		<edge from-layer="4464" from-port="3" to-layer="4465" to-port="1"/>
-		<edge from-layer="4465" from-port="2" to-layer="4467" to-port="0"/>
-		<edge from-layer="4466" from-port="0" to-layer="4467" to-port="1"/>
-		<edge from-layer="4467" from-port="2" to-layer="4471" to-port="1"/>
-		<edge from-layer="4468" from-port="0" to-layer="4470" to-port="1"/>
-		<edge from-layer="4469" from-port="0" to-layer="4470" to-port="2"/>
-		<edge from-layer="4470" from-port="3" to-layer="4471" to-port="2"/>
-		<edge from-layer="4471" from-port="3" to-layer="4472" to-port="1"/>
-		<edge from-layer="4472" from-port="2" to-layer="4474" to-port="0"/>
-		<edge from-layer="4472" from-port="2" to-layer="4611" to-port="1"/>
-		<edge from-layer="4473" from-port="0" to-layer="4474" to-port="1"/>
-		<edge from-layer="4474" from-port="2" to-layer="4476" to-port="0"/>
-		<edge from-layer="4475" from-port="0" to-layer="4476" to-port="1"/>
-		<edge from-layer="4476" from-port="2" to-layer="4478" to-port="0"/>
-		<edge from-layer="4477" from-port="0" to-layer="4478" to-port="1"/>
-		<edge from-layer="4478" from-port="2" to-layer="4511" to-port="0"/>
-		<edge from-layer="4478" from-port="2" to-layer="4546" to-port="0"/>
-		<edge from-layer="4478" from-port="2" to-layer="4480" to-port="0"/>
-		<edge from-layer="4479" from-port="0" to-layer="4480" to-port="1"/>
-		<edge from-layer="4480" from-port="2" to-layer="4482" to-port="0"/>
-		<edge from-layer="4480" from-port="2" to-layer="4491" to-port="0"/>
-		<edge from-layer="4481" from-port="0" to-layer="4490" to-port="0"/>
-		<edge from-layer="4482" from-port="1" to-layer="4503" to-port="0"/>
-		<edge from-layer="4482" from-port="1" to-layer="4485" to-port="0"/>
-		<edge from-layer="4482" from-port="1" to-layer="4496" to-port="0"/>
-		<edge from-layer="4483" from-port="0" to-layer="4485" to-port="1"/>
-		<edge from-layer="4484" from-port="0" to-layer="4485" to-port="2"/>
-		<edge from-layer="4485" from-port="3" to-layer="4505" to-port="0"/>
-		<edge from-layer="4485" from-port="3" to-layer="4487" to-port="0"/>
-		<edge from-layer="4486" from-port="0" to-layer="4487" to-port="1"/>
-		<edge from-layer="4487" from-port="2" to-layer="4489" to-port="0"/>
-		<edge from-layer="4488" from-port="0" to-layer="4489" to-port="1"/>
-		<edge from-layer="4489" from-port="2" to-layer="4490" to-port="2"/>
-		<edge from-layer="4490" from-port="3" to-layer="4491" to-port="1"/>
-		<edge from-layer="4491" from-port="2" to-layer="4493" to-port="0"/>
-		<edge from-layer="4492" from-port="0" to-layer="4493" to-port="1"/>
-		<edge from-layer="4493" from-port="2" to-layer="4509" to-port="0"/>
-		<edge from-layer="4494" from-port="0" to-layer="4496" to-port="1"/>
-		<edge from-layer="4495" from-port="0" to-layer="4496" to-port="2"/>
-		<edge from-layer="4496" from-port="3" to-layer="4498" to-port="0"/>
-		<edge from-layer="4497" from-port="0" to-layer="4498" to-port="1"/>
-		<edge from-layer="4498" from-port="2" to-layer="4500" to-port="0"/>
-		<edge from-layer="4499" from-port="0" to-layer="4500" to-port="1"/>
-		<edge from-layer="4500" from-port="2" to-layer="4508" to-port="0"/>
-		<edge from-layer="4501" from-port="0" to-layer="4503" to-port="1"/>
-		<edge from-layer="4502" from-port="0" to-layer="4503" to-port="2"/>
-		<edge from-layer="4503" from-port="3" to-layer="4508" to-port="1"/>
-		<edge from-layer="4504" from-port="0" to-layer="4505" to-port="1"/>
-		<edge from-layer="4505" from-port="2" to-layer="4507" to-port="0"/>
-		<edge from-layer="4506" from-port="0" to-layer="4507" to-port="1"/>
-		<edge from-layer="4507" from-port="2" to-layer="4508" to-port="2"/>
-		<edge from-layer="4508" from-port="3" to-layer="4509" to-port="1"/>
-		<edge from-layer="4509" from-port="2" to-layer="4541" to-port="0"/>
-		<edge from-layer="4510" from-port="0" to-layer="4511" to-port="1"/>
-		<edge from-layer="4511" from-port="2" to-layer="4522" to-port="0"/>
-		<edge from-layer="4511" from-port="2" to-layer="4513" to-port="0"/>
-		<edge from-layer="4512" from-port="0" to-layer="4521" to-port="0"/>
-		<edge from-layer="4513" from-port="1" to-layer="4534" to-port="0"/>
-		<edge from-layer="4513" from-port="1" to-layer="4527" to-port="0"/>
-		<edge from-layer="4513" from-port="1" to-layer="4516" to-port="0"/>
-		<edge from-layer="4514" from-port="0" to-layer="4516" to-port="1"/>
-		<edge from-layer="4515" from-port="0" to-layer="4516" to-port="2"/>
-		<edge from-layer="4516" from-port="3" to-layer="4536" to-port="0"/>
-		<edge from-layer="4516" from-port="3" to-layer="4518" to-port="0"/>
-		<edge from-layer="4517" from-port="0" to-layer="4518" to-port="1"/>
-		<edge from-layer="4518" from-port="2" to-layer="4520" to-port="0"/>
-		<edge from-layer="4519" from-port="0" to-layer="4520" to-port="1"/>
-		<edge from-layer="4520" from-port="2" to-layer="4521" to-port="2"/>
-		<edge from-layer="4521" from-port="3" to-layer="4522" to-port="1"/>
-		<edge from-layer="4522" from-port="2" to-layer="4524" to-port="0"/>
-		<edge from-layer="4523" from-port="0" to-layer="4524" to-port="1"/>
-		<edge from-layer="4524" from-port="2" to-layer="4540" to-port="0"/>
-		<edge from-layer="4525" from-port="0" to-layer="4527" to-port="1"/>
-		<edge from-layer="4526" from-port="0" to-layer="4527" to-port="2"/>
-		<edge from-layer="4527" from-port="3" to-layer="4529" to-port="0"/>
-		<edge from-layer="4528" from-port="0" to-layer="4529" to-port="1"/>
-		<edge from-layer="4529" from-port="2" to-layer="4531" to-port="0"/>
-		<edge from-layer="4530" from-port="0" to-layer="4531" to-port="1"/>
-		<edge from-layer="4531" from-port="2" to-layer="4539" to-port="0"/>
-		<edge from-layer="4532" from-port="0" to-layer="4534" to-port="1"/>
-		<edge from-layer="4533" from-port="0" to-layer="4534" to-port="2"/>
-		<edge from-layer="4534" from-port="3" to-layer="4539" to-port="1"/>
-		<edge from-layer="4535" from-port="0" to-layer="4536" to-port="1"/>
-		<edge from-layer="4536" from-port="2" to-layer="4538" to-port="0"/>
-		<edge from-layer="4537" from-port="0" to-layer="4538" to-port="1"/>
-		<edge from-layer="4538" from-port="2" to-layer="4539" to-port="2"/>
-		<edge from-layer="4539" from-port="3" to-layer="4540" to-port="1"/>
-		<edge from-layer="4540" from-port="2" to-layer="4541" to-port="1"/>
-		<edge from-layer="4541" from-port="2" to-layer="4543" to-port="0"/>
-		<edge from-layer="4542" from-port="0" to-layer="4543" to-port="1"/>
-		<edge from-layer="4543" from-port="2" to-layer="4544" to-port="0"/>
-		<edge from-layer="4544" from-port="1" to-layer="4576" to-port="0"/>
-		<edge from-layer="4545" from-port="0" to-layer="4546" to-port="1"/>
-		<edge from-layer="4546" from-port="2" to-layer="4548" to-port="0"/>
-		<edge from-layer="4546" from-port="2" to-layer="4557" to-port="0"/>
-		<edge from-layer="4547" from-port="0" to-layer="4556" to-port="0"/>
-		<edge from-layer="4548" from-port="1" to-layer="4562" to-port="0"/>
-		<edge from-layer="4548" from-port="1" to-layer="4569" to-port="0"/>
-		<edge from-layer="4548" from-port="1" to-layer="4551" to-port="0"/>
-		<edge from-layer="4549" from-port="0" to-layer="4551" to-port="1"/>
-		<edge from-layer="4550" from-port="0" to-layer="4551" to-port="2"/>
-		<edge from-layer="4551" from-port="3" to-layer="4553" to-port="0"/>
-		<edge from-layer="4551" from-port="3" to-layer="4571" to-port="0"/>
-		<edge from-layer="4552" from-port="0" to-layer="4553" to-port="1"/>
-		<edge from-layer="4553" from-port="2" to-layer="4555" to-port="0"/>
-		<edge from-layer="4554" from-port="0" to-layer="4555" to-port="1"/>
-		<edge from-layer="4555" from-port="2" to-layer="4556" to-port="2"/>
-		<edge from-layer="4556" from-port="3" to-layer="4557" to-port="1"/>
-		<edge from-layer="4557" from-port="2" to-layer="4559" to-port="0"/>
-		<edge from-layer="4558" from-port="0" to-layer="4559" to-port="1"/>
-		<edge from-layer="4559" from-port="2" to-layer="4575" to-port="0"/>
-		<edge from-layer="4560" from-port="0" to-layer="4562" to-port="1"/>
-		<edge from-layer="4561" from-port="0" to-layer="4562" to-port="2"/>
-		<edge from-layer="4562" from-port="3" to-layer="4564" to-port="0"/>
-		<edge from-layer="4563" from-port="0" to-layer="4564" to-port="1"/>
-		<edge from-layer="4564" from-port="2" to-layer="4566" to-port="0"/>
-		<edge from-layer="4565" from-port="0" to-layer="4566" to-port="1"/>
-		<edge from-layer="4566" from-port="2" to-layer="4574" to-port="0"/>
-		<edge from-layer="4567" from-port="0" to-layer="4569" to-port="1"/>
-		<edge from-layer="4568" from-port="0" to-layer="4569" to-port="2"/>
-		<edge from-layer="4569" from-port="3" to-layer="4574" to-port="1"/>
-		<edge from-layer="4570" from-port="0" to-layer="4571" to-port="1"/>
-		<edge from-layer="4571" from-port="2" to-layer="4573" to-port="0"/>
-		<edge from-layer="4572" from-port="0" to-layer="4573" to-port="1"/>
-		<edge from-layer="4573" from-port="2" to-layer="4574" to-port="2"/>
-		<edge from-layer="4574" from-port="3" to-layer="4575" to-port="1"/>
-		<edge from-layer="4575" from-port="2" to-layer="4576" to-port="1"/>
-		<edge from-layer="4576" from-port="2" to-layer="4589" to-port="0"/>
-		<edge from-layer="4576" from-port="2" to-layer="4577" to-port="0"/>
-		<edge from-layer="4577" from-port="1" to-layer="4587" to-port="0"/>
-		<edge from-layer="4577" from-port="1" to-layer="4580" to-port="0"/>
-		<edge from-layer="4577" from-port="1" to-layer="4598" to-port="0"/>
-		<edge from-layer="4577" from-port="1" to-layer="4601" to-port="0"/>
-		<edge from-layer="4578" from-port="0" to-layer="4580" to-port="1"/>
-		<edge from-layer="4579" from-port="0" to-layer="4580" to-port="2"/>
-		<edge from-layer="4580" from-port="3" to-layer="4593" to-port="0"/>
-		<edge from-layer="4580" from-port="3" to-layer="4582" to-port="0"/>
-		<edge from-layer="4581" from-port="0" to-layer="4582" to-port="1"/>
-		<edge from-layer="4582" from-port="2" to-layer="4584" to-port="0"/>
-		<edge from-layer="4583" from-port="0" to-layer="4584" to-port="1"/>
-		<edge from-layer="4584" from-port="2" to-layer="4588" to-port="0"/>
-		<edge from-layer="4585" from-port="0" to-layer="4587" to-port="1"/>
-		<edge from-layer="4586" from-port="0" to-layer="4587" to-port="2"/>
-		<edge from-layer="4587" from-port="3" to-layer="4588" to-port="2"/>
-		<edge from-layer="4588" from-port="3" to-layer="4589" to-port="1"/>
-		<edge from-layer="4589" from-port="2" to-layer="4591" to-port="0"/>
-		<edge from-layer="4590" from-port="0" to-layer="4591" to-port="1"/>
-		<edge from-layer="4591" from-port="2" to-layer="4607" to-port="0"/>
-		<edge from-layer="4592" from-port="0" to-layer="4593" to-port="1"/>
-		<edge from-layer="4593" from-port="2" to-layer="4595" to-port="0"/>
-		<edge from-layer="4594" from-port="0" to-layer="4595" to-port="1"/>
-		<edge from-layer="4595" from-port="2" to-layer="4606" to-port="0"/>
-		<edge from-layer="4596" from-port="0" to-layer="4598" to-port="1"/>
-		<edge from-layer="4597" from-port="0" to-layer="4598" to-port="2"/>
-		<edge from-layer="4598" from-port="3" to-layer="4606" to-port="1"/>
-		<edge from-layer="4599" from-port="0" to-layer="4601" to-port="1"/>
-		<edge from-layer="4600" from-port="0" to-layer="4601" to-port="2"/>
-		<edge from-layer="4601" from-port="3" to-layer="4603" to-port="0"/>
-		<edge from-layer="4602" from-port="0" to-layer="4603" to-port="1"/>
-		<edge from-layer="4603" from-port="2" to-layer="4605" to-port="0"/>
-		<edge from-layer="4604" from-port="0" to-layer="4605" to-port="1"/>
-		<edge from-layer="4605" from-port="2" to-layer="4606" to-port="2"/>
-		<edge from-layer="4606" from-port="3" to-layer="4607" to-port="1"/>
-		<edge from-layer="4607" from-port="2" to-layer="4609" to-port="0"/>
-		<edge from-layer="4608" from-port="0" to-layer="4609" to-port="1"/>
-		<edge from-layer="4609" from-port="2" to-layer="4610" to-port="1"/>
-		<edge from-layer="4610" from-port="2" to-layer="4611" to-port="0"/>
-		<edge from-layer="4611" from-port="2" to-layer="4700" to-port="1"/>
-		<edge from-layer="4611" from-port="2" to-layer="4613" to-port="0"/>
-		<edge from-layer="4612" from-port="0" to-layer="4613" to-port="1"/>
-		<edge from-layer="4613" from-port="2" to-layer="4615" to-port="0"/>
-		<edge from-layer="4614" from-port="0" to-layer="4615" to-port="1"/>
-		<edge from-layer="4615" from-port="2" to-layer="4617" to-port="0"/>
-		<edge from-layer="4616" from-port="0" to-layer="4617" to-port="1"/>
-		<edge from-layer="4617" from-port="2" to-layer="4619" to-port="0"/>
-		<edge from-layer="4618" from-port="0" to-layer="4619" to-port="1"/>
-		<edge from-layer="4619" from-port="2" to-layer="4621" to-port="0"/>
-		<edge from-layer="4619" from-port="2" to-layer="4630" to-port="0"/>
-		<edge from-layer="4620" from-port="0" to-layer="4629" to-port="0"/>
-		<edge from-layer="4621" from-port="1" to-layer="4624" to-port="0"/>
-		<edge from-layer="4621" from-port="1" to-layer="4635" to-port="0"/>
-		<edge from-layer="4621" from-port="1" to-layer="4642" to-port="0"/>
-		<edge from-layer="4622" from-port="0" to-layer="4624" to-port="1"/>
-		<edge from-layer="4623" from-port="0" to-layer="4624" to-port="2"/>
-		<edge from-layer="4624" from-port="3" to-layer="4626" to-port="0"/>
-		<edge from-layer="4624" from-port="3" to-layer="4644" to-port="0"/>
-		<edge from-layer="4625" from-port="0" to-layer="4626" to-port="1"/>
-		<edge from-layer="4626" from-port="2" to-layer="4628" to-port="0"/>
-		<edge from-layer="4627" from-port="0" to-layer="4628" to-port="1"/>
-		<edge from-layer="4628" from-port="2" to-layer="4629" to-port="2"/>
-		<edge from-layer="4629" from-port="3" to-layer="4630" to-port="1"/>
-		<edge from-layer="4630" from-port="2" to-layer="4632" to-port="0"/>
-		<edge from-layer="4631" from-port="0" to-layer="4632" to-port="1"/>
-		<edge from-layer="4632" from-port="2" to-layer="4648" to-port="0"/>
-		<edge from-layer="4633" from-port="0" to-layer="4635" to-port="1"/>
-		<edge from-layer="4634" from-port="0" to-layer="4635" to-port="2"/>
-		<edge from-layer="4635" from-port="3" to-layer="4637" to-port="0"/>
-		<edge from-layer="4636" from-port="0" to-layer="4637" to-port="1"/>
-		<edge from-layer="4637" from-port="2" to-layer="4639" to-port="0"/>
-		<edge from-layer="4638" from-port="0" to-layer="4639" to-port="1"/>
-		<edge from-layer="4639" from-port="2" to-layer="4647" to-port="0"/>
-		<edge from-layer="4640" from-port="0" to-layer="4642" to-port="1"/>
-		<edge from-layer="4641" from-port="0" to-layer="4642" to-port="2"/>
-		<edge from-layer="4642" from-port="3" to-layer="4647" to-port="1"/>
-		<edge from-layer="4643" from-port="0" to-layer="4644" to-port="1"/>
-		<edge from-layer="4644" from-port="2" to-layer="4646" to-port="0"/>
-		<edge from-layer="4645" from-port="0" to-layer="4646" to-port="1"/>
-		<edge from-layer="4646" from-port="2" to-layer="4647" to-port="2"/>
-		<edge from-layer="4647" from-port="3" to-layer="4648" to-port="1"/>
-		<edge from-layer="4648" from-port="2" to-layer="4655" to-port="0"/>
-		<edge from-layer="4649" from-port="0" to-layer="4650" to-port="1"/>
-		<edge from-layer="4650" from-port="2" to-layer="4651" to-port="0"/>
-		<edge from-layer="4651" from-port="2" to-layer="4653" to-port="0"/>
-		<edge from-layer="4652" from-port="0" to-layer="4653" to-port="1"/>
-		<edge from-layer="4653" from-port="2" to-layer="4654" to-port="0"/>
-		<edge from-layer="4654" from-port="2" to-layer="4655" to-port="1"/>
-		<edge from-layer="4655" from-port="2" to-layer="4657" to-port="0"/>
-		<edge from-layer="4656" from-port="0" to-layer="4657" to-port="1"/>
-		<edge from-layer="4657" from-port="2" to-layer="4658" to-port="0"/>
-		<edge from-layer="4658" from-port="1" to-layer="4665" to-port="0"/>
-		<edge from-layer="4659" from-port="0" to-layer="4660" to-port="1"/>
-		<edge from-layer="4660" from-port="2" to-layer="4661" to-port="0"/>
-		<edge from-layer="4661" from-port="2" to-layer="4663" to-port="0"/>
-		<edge from-layer="4662" from-port="0" to-layer="4663" to-port="1"/>
-		<edge from-layer="4663" from-port="2" to-layer="4664" to-port="0"/>
-		<edge from-layer="4664" from-port="2" to-layer="4665" to-port="1"/>
-		<edge from-layer="4665" from-port="2" to-layer="4678" to-port="0"/>
-		<edge from-layer="4665" from-port="2" to-layer="4666" to-port="0"/>
-		<edge from-layer="4666" from-port="1" to-layer="4687" to-port="0"/>
-		<edge from-layer="4666" from-port="1" to-layer="4676" to-port="0"/>
-		<edge from-layer="4666" from-port="1" to-layer="4669" to-port="0"/>
-		<edge from-layer="4666" from-port="1" to-layer="4690" to-port="0"/>
-		<edge from-layer="4667" from-port="0" to-layer="4669" to-port="1"/>
-		<edge from-layer="4668" from-port="0" to-layer="4669" to-port="2"/>
-		<edge from-layer="4669" from-port="3" to-layer="4682" to-port="0"/>
-		<edge from-layer="4669" from-port="3" to-layer="4671" to-port="0"/>
-		<edge from-layer="4670" from-port="0" to-layer="4671" to-port="1"/>
-		<edge from-layer="4671" from-port="2" to-layer="4673" to-port="0"/>
-		<edge from-layer="4672" from-port="0" to-layer="4673" to-port="1"/>
-		<edge from-layer="4673" from-port="2" to-layer="4677" to-port="0"/>
-		<edge from-layer="4674" from-port="0" to-layer="4676" to-port="1"/>
-		<edge from-layer="4675" from-port="0" to-layer="4676" to-port="2"/>
-		<edge from-layer="4676" from-port="3" to-layer="4677" to-port="2"/>
-		<edge from-layer="4677" from-port="3" to-layer="4678" to-port="1"/>
-		<edge from-layer="4678" from-port="2" to-layer="4680" to-port="0"/>
-		<edge from-layer="4679" from-port="0" to-layer="4680" to-port="1"/>
-		<edge from-layer="4680" from-port="2" to-layer="4696" to-port="0"/>
-		<edge from-layer="4681" from-port="0" to-layer="4682" to-port="1"/>
-		<edge from-layer="4682" from-port="2" to-layer="4684" to-port="0"/>
-		<edge from-layer="4683" from-port="0" to-layer="4684" to-port="1"/>
-		<edge from-layer="4684" from-port="2" to-layer="4695" to-port="0"/>
-		<edge from-layer="4685" from-port="0" to-layer="4687" to-port="1"/>
-		<edge from-layer="4686" from-port="0" to-layer="4687" to-port="2"/>
-		<edge from-layer="4687" from-port="3" to-layer="4695" to-port="1"/>
-		<edge from-layer="4688" from-port="0" to-layer="4690" to-port="1"/>
-		<edge from-layer="4689" from-port="0" to-layer="4690" to-port="2"/>
-		<edge from-layer="4690" from-port="3" to-layer="4692" to-port="0"/>
-		<edge from-layer="4691" from-port="0" to-layer="4692" to-port="1"/>
-		<edge from-layer="4692" from-port="2" to-layer="4694" to-port="0"/>
-		<edge from-layer="4693" from-port="0" to-layer="4694" to-port="1"/>
-		<edge from-layer="4694" from-port="2" to-layer="4695" to-port="2"/>
-		<edge from-layer="4695" from-port="3" to-layer="4696" to-port="1"/>
-		<edge from-layer="4696" from-port="2" to-layer="4698" to-port="0"/>
-		<edge from-layer="4697" from-port="0" to-layer="4698" to-port="1"/>
-		<edge from-layer="4698" from-port="2" to-layer="4699" to-port="1"/>
-		<edge from-layer="4699" from-port="2" to-layer="4700" to-port="0"/>
-		<edge from-layer="4700" from-port="2" to-layer="4702" to-port="0"/>
-		<edge from-layer="4700" from-port="2" to-layer="4740" to-port="1"/>
-		<edge from-layer="4701" from-port="0" to-layer="4702" to-port="1"/>
-		<edge from-layer="4702" from-port="2" to-layer="4704" to-port="0"/>
-		<edge from-layer="4703" from-port="0" to-layer="4704" to-port="1"/>
-		<edge from-layer="4704" from-port="2" to-layer="4706" to-port="0"/>
-		<edge from-layer="4705" from-port="0" to-layer="4706" to-port="1"/>
-		<edge from-layer="4706" from-port="2" to-layer="4708" to-port="0"/>
-		<edge from-layer="4707" from-port="0" to-layer="4708" to-port="1"/>
-		<edge from-layer="4708" from-port="2" to-layer="4709" to-port="1"/>
-		<edge from-layer="4709" from-port="2" to-layer="4734" to-port="0"/>
-		<edge from-layer="4709" from-port="2" to-layer="4724" to-port="0"/>
-		<edge from-layer="4709" from-port="2" to-layer="4713" to-port="0"/>
-		<edge from-layer="4710" from-port="0" to-layer="4724" to-port="1"/>
-		<edge from-layer="4711" from-port="0" to-layer="4722" to-port="0"/>
-		<edge from-layer="4712" from-port="0" to-layer="4722" to-port="1"/>
-		<edge from-layer="4713" from-port="1" to-layer="4716" to-port="0"/>
-		<edge from-layer="4714" from-port="0" to-layer="4716" to-port="1"/>
-		<edge from-layer="4715" from-port="0" to-layer="4716" to-port="2"/>
-		<edge from-layer="4716" from-port="3" to-layer="4718" to-port="0"/>
-		<edge from-layer="4717" from-port="0" to-layer="4718" to-port="1"/>
-		<edge from-layer="4718" from-port="2" to-layer="4720" to-port="0"/>
-		<edge from-layer="4719" from-port="0" to-layer="4720" to-port="1"/>
-		<edge from-layer="4720" from-port="2" to-layer="4731" to-port="0"/>
-		<edge from-layer="4720" from-port="2" to-layer="4722" to-port="2"/>
-		<edge from-layer="4720" from-port="2" to-layer="4728" to-port="2"/>
-		<edge from-layer="4721" from-port="0" to-layer="4722" to-port="3"/>
-		<edge from-layer="4722" from-port="4" to-layer="4724" to-port="2"/>
-		<edge from-layer="4723" from-port="0" to-layer="4724" to-port="3"/>
-		<edge from-layer="4724" from-port="4" to-layer="4736" to-port="0"/>
-		<edge from-layer="4725" from-port="0" to-layer="4728" to-port="0"/>
-		<edge from-layer="4726" from-port="0" to-layer="4728" to-port="1"/>
-		<edge from-layer="4726" from-port="0" to-layer="4732" to-port="1"/>
-		<edge from-layer="4727" from-port="0" to-layer="4732" to-port="3"/>
-		<edge from-layer="4727" from-port="0" to-layer="4728" to-port="3"/>
-		<edge from-layer="4728" from-port="4" to-layer="4734" to-port="1"/>
-		<edge from-layer="4729" from-port="0" to-layer="4732" to-port="0"/>
-		<edge from-layer="4730" from-port="0" to-layer="4731" to-port="1"/>
-		<edge from-layer="4731" from-port="2" to-layer="4732" to-port="2"/>
-		<edge from-layer="4732" from-port="4" to-layer="4734" to-port="2"/>
-		<edge from-layer="4733" from-port="0" to-layer="4734" to-port="3"/>
-		<edge from-layer="4734" from-port="4" to-layer="4735" to-port="0"/>
-		<edge from-layer="4735" from-port="1" to-layer="4736" to-port="1"/>
-		<edge from-layer="4736" from-port="2" to-layer="4738" to-port="0"/>
-		<edge from-layer="4737" from-port="0" to-layer="4738" to-port="1"/>
-		<edge from-layer="4738" from-port="2" to-layer="4739" to-port="1"/>
-		<edge from-layer="4739" from-port="2" to-layer="4740" to-port="0"/>
-		<edge from-layer="4740" from-port="2" to-layer="4744" to-port="0"/>
-		<edge from-layer="4741" from-port="0" to-layer="4743" to-port="1"/>
-		<edge from-layer="4742" from-port="0" to-layer="4743" to-port="2"/>
-		<edge from-layer="4743" from-port="3" to-layer="4744" to-port="1"/>
-		<edge from-layer="4744" from-port="2" to-layer="4746" to-port="0"/>
-		<edge from-layer="4745" from-port="0" to-layer="4746" to-port="1"/>
-		<edge from-layer="4746" from-port="2" to-layer="4748" to-port="0"/>
-		<edge from-layer="4747" from-port="0" to-layer="4748" to-port="1"/>
-		<edge from-layer="4748" from-port="2" to-layer="4750" to-port="0"/>
-		<edge from-layer="4749" from-port="0" to-layer="4750" to-port="1"/>
-		<edge from-layer="4750" from-port="2" to-layer="4751" to-port="0"/>
-		<edge from-layer="4751" from-port="2" to-layer="4752" to-port="0"/>
-		<edge from-layer="4752" from-port="2" to-layer="4754" to-port="0"/>
-		<edge from-layer="4752" from-port="2" to-layer="4758" to-port="0"/>
-		<edge from-layer="4752" from-port="2" to-layer="4761" to-port="0"/>
-		<edge from-layer="4753" from-port="0" to-layer="4754" to-port="1"/>
-		<edge from-layer="4754" from-port="2" to-layer="4756" to-port="0"/>
-		<edge from-layer="4755" from-port="0" to-layer="4756" to-port="1"/>
-		<edge from-layer="4756" from-port="2" to-layer="4797" to-port="0"/>
-		<edge from-layer="4757" from-port="0" to-layer="4758" to-port="1"/>
-		<edge from-layer="4758" from-port="2" to-layer="4760" to-port="0"/>
-		<edge from-layer="4759" from-port="0" to-layer="4760" to-port="1"/>
-		<edge from-layer="4760" from-port="2" to-layer="4762" to-port="0"/>
-		<edge from-layer="4761" from-port="1" to-layer="4762" to-port="1"/>
-		<edge from-layer="4762" from-port="2" to-layer="4764" to-port="0"/>
-		<edge from-layer="4763" from-port="0" to-layer="4764" to-port="1"/>
-		<edge from-layer="4764" from-port="2" to-layer="4766" to-port="0"/>
-		<edge from-layer="4765" from-port="0" to-layer="4766" to-port="1"/>
-		<edge from-layer="4766" from-port="2" to-layer="4767" to-port="0"/>
-		<edge from-layer="4767" from-port="1" to-layer="4769" to-port="0"/>
-		<edge from-layer="4768" from-port="0" to-layer="4769" to-port="1"/>
-		<edge from-layer="4769" from-port="2" to-layer="4771" to-port="0"/>
-		<edge from-layer="4770" from-port="0" to-layer="4771" to-port="1"/>
-		<edge from-layer="4771" from-port="2" to-layer="4781" to-port="0"/>
-		<edge from-layer="4772" from-port="1" to-layer="4774" to-port="0"/>
-		<edge from-layer="4773" from-port="0" to-layer="4774" to-port="1"/>
-		<edge from-layer="4774" from-port="2" to-layer="4776" to-port="0"/>
-		<edge from-layer="4775" from-port="0" to-layer="4776" to-port="1"/>
-		<edge from-layer="4776" from-port="2" to-layer="4778" to-port="0"/>
-		<edge from-layer="4777" from-port="0" to-layer="4778" to-port="1"/>
-		<edge from-layer="4778" from-port="2" to-layer="4780" to-port="0"/>
-		<edge from-layer="4779" from-port="0" to-layer="4780" to-port="1"/>
-		<edge from-layer="4780" from-port="2" to-layer="4781" to-port="1"/>
-		<edge from-layer="4781" from-port="2" to-layer="4786" to-port="0"/>
-		<edge from-layer="4781" from-port="2" to-layer="4783" to-port="0"/>
-		<edge from-layer="4782" from-port="0" to-layer="4783" to-port="1"/>
-		<edge from-layer="4783" from-port="2" to-layer="4785" to-port="0"/>
-		<edge from-layer="4784" from-port="0" to-layer="4785" to-port="1"/>
-		<edge from-layer="4785" from-port="2" to-layer="4787" to-port="0"/>
-		<edge from-layer="4786" from-port="1" to-layer="4787" to-port="1"/>
-		<edge from-layer="4787" from-port="2" to-layer="4789" to-port="0"/>
-		<edge from-layer="4788" from-port="0" to-layer="4789" to-port="1"/>
-		<edge from-layer="4789" from-port="2" to-layer="4791" to-port="0"/>
-		<edge from-layer="4790" from-port="0" to-layer="4791" to-port="1"/>
-		<edge from-layer="4791" from-port="2" to-layer="4792" to-port="0"/>
-		<edge from-layer="4792" from-port="1" to-layer="4794" to-port="0"/>
-		<edge from-layer="4793" from-port="0" to-layer="4794" to-port="1"/>
-		<edge from-layer="4794" from-port="2" to-layer="4796" to-port="0"/>
-		<edge from-layer="4795" from-port="0" to-layer="4796" to-port="1"/>
-		<edge from-layer="4796" from-port="2" to-layer="4797" to-port="1"/>
-		<edge from-layer="4797" from-port="2" to-layer="5109" to-port="1"/>
-		<edge from-layer="4797" from-port="2" to-layer="4799" to-port="0"/>
-		<edge from-layer="4797" from-port="2" to-layer="4802" to-port="0"/>
-		<edge from-layer="4798" from-port="0" to-layer="4799" to-port="1"/>
-		<edge from-layer="4799" from-port="2" to-layer="4801" to-port="0"/>
-		<edge from-layer="4800" from-port="0" to-layer="4801" to-port="1"/>
-		<edge from-layer="4801" from-port="2" to-layer="4803" to-port="0"/>
-		<edge from-layer="4802" from-port="1" to-layer="4803" to-port="1"/>
-		<edge from-layer="4802" from-port="1" to-layer="4816" to-port="0"/>
-		<edge from-layer="4802" from-port="1" to-layer="4819" to-port="0"/>
-		<edge from-layer="4802" from-port="1" to-layer="4822" to-port="0"/>
-		<edge from-layer="4802" from-port="1" to-layer="5101" to-port="0"/>
-		<edge from-layer="4802" from-port="1" to-layer="4828" to-port="0"/>
-		<edge from-layer="4803" from-port="2" to-layer="4805" to-port="0"/>
-		<edge from-layer="4804" from-port="0" to-layer="4805" to-port="1"/>
-		<edge from-layer="4805" from-port="2" to-layer="4807" to-port="0"/>
-		<edge from-layer="4806" from-port="0" to-layer="4807" to-port="1"/>
-		<edge from-layer="4807" from-port="2" to-layer="4809" to-port="0"/>
-		<edge from-layer="4808" from-port="0" to-layer="4809" to-port="1"/>
-		<edge from-layer="4809" from-port="2" to-layer="4811" to-port="0"/>
-		<edge from-layer="4810" from-port="0" to-layer="4811" to-port="1"/>
-		<edge from-layer="4811" from-port="2" to-layer="4813" to-port="0"/>
-		<edge from-layer="4812" from-port="0" to-layer="4813" to-port="1"/>
-		<edge from-layer="4813" from-port="2" to-layer="4830" to-port="0"/>
-		<edge from-layer="4814" from-port="0" to-layer="4816" to-port="1"/>
-		<edge from-layer="4815" from-port="0" to-layer="4816" to-port="2"/>
-		<edge from-layer="4816" from-port="3" to-layer="4829" to-port="0"/>
-		<edge from-layer="4817" from-port="0" to-layer="4819" to-port="1"/>
-		<edge from-layer="4818" from-port="0" to-layer="4819" to-port="2"/>
-		<edge from-layer="4819" from-port="3" to-layer="4823" to-port="0"/>
-		<edge from-layer="4820" from-port="0" to-layer="4822" to-port="1"/>
-		<edge from-layer="4821" from-port="0" to-layer="4822" to-port="2"/>
-		<edge from-layer="4822" from-port="3" to-layer="4823" to-port="1"/>
-		<edge from-layer="4823" from-port="2" to-layer="4825" to-port="0"/>
-		<edge from-layer="4824" from-port="0" to-layer="4825" to-port="1"/>
-		<edge from-layer="4825" from-port="2" to-layer="4829" to-port="1"/>
-		<edge from-layer="4826" from-port="0" to-layer="4828" to-port="1"/>
-		<edge from-layer="4827" from-port="0" to-layer="4828" to-port="2"/>
-		<edge from-layer="4828" from-port="3" to-layer="4829" to-port="2"/>
-		<edge from-layer="4829" from-port="3" to-layer="4830" to-port="1"/>
-		<edge from-layer="4830" from-port="2" to-layer="4832" to-port="0"/>
-		<edge from-layer="4830" from-port="2" to-layer="4969" to-port="1"/>
-		<edge from-layer="4831" from-port="0" to-layer="4832" to-port="1"/>
-		<edge from-layer="4832" from-port="2" to-layer="4834" to-port="0"/>
-		<edge from-layer="4833" from-port="0" to-layer="4834" to-port="1"/>
-		<edge from-layer="4834" from-port="2" to-layer="4836" to-port="0"/>
-		<edge from-layer="4835" from-port="0" to-layer="4836" to-port="1"/>
-		<edge from-layer="4836" from-port="2" to-layer="4904" to-port="0"/>
-		<edge from-layer="4836" from-port="2" to-layer="4869" to-port="0"/>
-		<edge from-layer="4836" from-port="2" to-layer="4838" to-port="0"/>
-		<edge from-layer="4837" from-port="0" to-layer="4838" to-port="1"/>
-		<edge from-layer="4838" from-port="2" to-layer="4849" to-port="0"/>
-		<edge from-layer="4838" from-port="2" to-layer="4840" to-port="0"/>
-		<edge from-layer="4839" from-port="0" to-layer="4848" to-port="0"/>
-		<edge from-layer="4840" from-port="1" to-layer="4843" to-port="0"/>
-		<edge from-layer="4840" from-port="1" to-layer="4861" to-port="0"/>
-		<edge from-layer="4840" from-port="1" to-layer="4854" to-port="0"/>
-		<edge from-layer="4841" from-port="0" to-layer="4843" to-port="1"/>
-		<edge from-layer="4842" from-port="0" to-layer="4843" to-port="2"/>
-		<edge from-layer="4843" from-port="3" to-layer="4845" to-port="0"/>
-		<edge from-layer="4843" from-port="3" to-layer="4863" to-port="0"/>
-		<edge from-layer="4844" from-port="0" to-layer="4845" to-port="1"/>
-		<edge from-layer="4845" from-port="2" to-layer="4847" to-port="0"/>
-		<edge from-layer="4846" from-port="0" to-layer="4847" to-port="1"/>
-		<edge from-layer="4847" from-port="2" to-layer="4848" to-port="2"/>
-		<edge from-layer="4848" from-port="3" to-layer="4849" to-port="1"/>
-		<edge from-layer="4849" from-port="2" to-layer="4851" to-port="0"/>
-		<edge from-layer="4850" from-port="0" to-layer="4851" to-port="1"/>
-		<edge from-layer="4851" from-port="2" to-layer="4867" to-port="0"/>
-		<edge from-layer="4852" from-port="0" to-layer="4854" to-port="1"/>
-		<edge from-layer="4853" from-port="0" to-layer="4854" to-port="2"/>
-		<edge from-layer="4854" from-port="3" to-layer="4856" to-port="0"/>
-		<edge from-layer="4855" from-port="0" to-layer="4856" to-port="1"/>
-		<edge from-layer="4856" from-port="2" to-layer="4858" to-port="0"/>
-		<edge from-layer="4857" from-port="0" to-layer="4858" to-port="1"/>
-		<edge from-layer="4858" from-port="2" to-layer="4866" to-port="0"/>
-		<edge from-layer="4859" from-port="0" to-layer="4861" to-port="1"/>
-		<edge from-layer="4860" from-port="0" to-layer="4861" to-port="2"/>
-		<edge from-layer="4861" from-port="3" to-layer="4866" to-port="1"/>
-		<edge from-layer="4862" from-port="0" to-layer="4863" to-port="1"/>
-		<edge from-layer="4863" from-port="2" to-layer="4865" to-port="0"/>
-		<edge from-layer="4864" from-port="0" to-layer="4865" to-port="1"/>
-		<edge from-layer="4865" from-port="2" to-layer="4866" to-port="2"/>
-		<edge from-layer="4866" from-port="3" to-layer="4867" to-port="1"/>
-		<edge from-layer="4867" from-port="2" to-layer="4899" to-port="0"/>
-		<edge from-layer="4868" from-port="0" to-layer="4869" to-port="1"/>
-		<edge from-layer="4869" from-port="2" to-layer="4880" to-port="0"/>
-		<edge from-layer="4869" from-port="2" to-layer="4871" to-port="0"/>
-		<edge from-layer="4870" from-port="0" to-layer="4879" to-port="0"/>
-		<edge from-layer="4871" from-port="1" to-layer="4892" to-port="0"/>
-		<edge from-layer="4871" from-port="1" to-layer="4874" to-port="0"/>
-		<edge from-layer="4871" from-port="1" to-layer="4885" to-port="0"/>
-		<edge from-layer="4872" from-port="0" to-layer="4874" to-port="1"/>
-		<edge from-layer="4873" from-port="0" to-layer="4874" to-port="2"/>
-		<edge from-layer="4874" from-port="3" to-layer="4894" to-port="0"/>
-		<edge from-layer="4874" from-port="3" to-layer="4876" to-port="0"/>
-		<edge from-layer="4875" from-port="0" to-layer="4876" to-port="1"/>
-		<edge from-layer="4876" from-port="2" to-layer="4878" to-port="0"/>
-		<edge from-layer="4877" from-port="0" to-layer="4878" to-port="1"/>
-		<edge from-layer="4878" from-port="2" to-layer="4879" to-port="2"/>
-		<edge from-layer="4879" from-port="3" to-layer="4880" to-port="1"/>
-		<edge from-layer="4880" from-port="2" to-layer="4882" to-port="0"/>
-		<edge from-layer="4881" from-port="0" to-layer="4882" to-port="1"/>
-		<edge from-layer="4882" from-port="2" to-layer="4898" to-port="0"/>
-		<edge from-layer="4883" from-port="0" to-layer="4885" to-port="1"/>
-		<edge from-layer="4884" from-port="0" to-layer="4885" to-port="2"/>
-		<edge from-layer="4885" from-port="3" to-layer="4887" to-port="0"/>
-		<edge from-layer="4886" from-port="0" to-layer="4887" to-port="1"/>
-		<edge from-layer="4887" from-port="2" to-layer="4889" to-port="0"/>
-		<edge from-layer="4888" from-port="0" to-layer="4889" to-port="1"/>
-		<edge from-layer="4889" from-port="2" to-layer="4897" to-port="0"/>
-		<edge from-layer="4890" from-port="0" to-layer="4892" to-port="1"/>
-		<edge from-layer="4891" from-port="0" to-layer="4892" to-port="2"/>
-		<edge from-layer="4892" from-port="3" to-layer="4897" to-port="1"/>
-		<edge from-layer="4893" from-port="0" to-layer="4894" to-port="1"/>
-		<edge from-layer="4894" from-port="2" to-layer="4896" to-port="0"/>
-		<edge from-layer="4895" from-port="0" to-layer="4896" to-port="1"/>
-		<edge from-layer="4896" from-port="2" to-layer="4897" to-port="2"/>
-		<edge from-layer="4897" from-port="3" to-layer="4898" to-port="1"/>
-		<edge from-layer="4898" from-port="2" to-layer="4899" to-port="1"/>
-		<edge from-layer="4899" from-port="2" to-layer="4901" to-port="0"/>
-		<edge from-layer="4900" from-port="0" to-layer="4901" to-port="1"/>
-		<edge from-layer="4901" from-port="2" to-layer="4902" to-port="0"/>
-		<edge from-layer="4902" from-port="1" to-layer="4934" to-port="0"/>
-		<edge from-layer="4903" from-port="0" to-layer="4904" to-port="1"/>
-		<edge from-layer="4904" from-port="2" to-layer="4906" to-port="0"/>
-		<edge from-layer="4904" from-port="2" to-layer="4915" to-port="0"/>
-		<edge from-layer="4905" from-port="0" to-layer="4914" to-port="0"/>
-		<edge from-layer="4906" from-port="1" to-layer="4909" to-port="0"/>
-		<edge from-layer="4906" from-port="1" to-layer="4927" to-port="0"/>
-		<edge from-layer="4906" from-port="1" to-layer="4920" to-port="0"/>
-		<edge from-layer="4907" from-port="0" to-layer="4909" to-port="1"/>
-		<edge from-layer="4908" from-port="0" to-layer="4909" to-port="2"/>
-		<edge from-layer="4909" from-port="3" to-layer="4929" to-port="0"/>
-		<edge from-layer="4909" from-port="3" to-layer="4911" to-port="0"/>
-		<edge from-layer="4910" from-port="0" to-layer="4911" to-port="1"/>
-		<edge from-layer="4911" from-port="2" to-layer="4913" to-port="0"/>
-		<edge from-layer="4912" from-port="0" to-layer="4913" to-port="1"/>
-		<edge from-layer="4913" from-port="2" to-layer="4914" to-port="2"/>
-		<edge from-layer="4914" from-port="3" to-layer="4915" to-port="1"/>
-		<edge from-layer="4915" from-port="2" to-layer="4917" to-port="0"/>
-		<edge from-layer="4916" from-port="0" to-layer="4917" to-port="1"/>
-		<edge from-layer="4917" from-port="2" to-layer="4933" to-port="0"/>
-		<edge from-layer="4918" from-port="0" to-layer="4920" to-port="1"/>
-		<edge from-layer="4919" from-port="0" to-layer="4920" to-port="2"/>
-		<edge from-layer="4920" from-port="3" to-layer="4922" to-port="0"/>
-		<edge from-layer="4921" from-port="0" to-layer="4922" to-port="1"/>
-		<edge from-layer="4922" from-port="2" to-layer="4924" to-port="0"/>
-		<edge from-layer="4923" from-port="0" to-layer="4924" to-port="1"/>
-		<edge from-layer="4924" from-port="2" to-layer="4932" to-port="0"/>
-		<edge from-layer="4925" from-port="0" to-layer="4927" to-port="1"/>
-		<edge from-layer="4926" from-port="0" to-layer="4927" to-port="2"/>
-		<edge from-layer="4927" from-port="3" to-layer="4932" to-port="1"/>
-		<edge from-layer="4928" from-port="0" to-layer="4929" to-port="1"/>
-		<edge from-layer="4929" from-port="2" to-layer="4931" to-port="0"/>
-		<edge from-layer="4930" from-port="0" to-layer="4931" to-port="1"/>
-		<edge from-layer="4931" from-port="2" to-layer="4932" to-port="2"/>
-		<edge from-layer="4932" from-port="3" to-layer="4933" to-port="1"/>
-		<edge from-layer="4933" from-port="2" to-layer="4934" to-port="1"/>
-		<edge from-layer="4934" from-port="2" to-layer="4947" to-port="0"/>
-		<edge from-layer="4934" from-port="2" to-layer="4935" to-port="0"/>
-		<edge from-layer="4935" from-port="1" to-layer="4945" to-port="0"/>
-		<edge from-layer="4935" from-port="1" to-layer="4959" to-port="0"/>
-		<edge from-layer="4935" from-port="1" to-layer="4938" to-port="0"/>
-		<edge from-layer="4935" from-port="1" to-layer="4956" to-port="0"/>
-		<edge from-layer="4936" from-port="0" to-layer="4938" to-port="1"/>
-		<edge from-layer="4937" from-port="0" to-layer="4938" to-port="2"/>
-		<edge from-layer="4938" from-port="3" to-layer="4940" to-port="0"/>
-		<edge from-layer="4938" from-port="3" to-layer="4951" to-port="0"/>
-		<edge from-layer="4939" from-port="0" to-layer="4940" to-port="1"/>
-		<edge from-layer="4940" from-port="2" to-layer="4942" to-port="0"/>
-		<edge from-layer="4941" from-port="0" to-layer="4942" to-port="1"/>
-		<edge from-layer="4942" from-port="2" to-layer="4946" to-port="0"/>
-		<edge from-layer="4943" from-port="0" to-layer="4945" to-port="1"/>
-		<edge from-layer="4944" from-port="0" to-layer="4945" to-port="2"/>
-		<edge from-layer="4945" from-port="3" to-layer="4946" to-port="2"/>
-		<edge from-layer="4946" from-port="3" to-layer="4947" to-port="1"/>
-		<edge from-layer="4947" from-port="2" to-layer="4949" to-port="0"/>
-		<edge from-layer="4948" from-port="0" to-layer="4949" to-port="1"/>
-		<edge from-layer="4949" from-port="2" to-layer="4965" to-port="0"/>
-		<edge from-layer="4950" from-port="0" to-layer="4951" to-port="1"/>
-		<edge from-layer="4951" from-port="2" to-layer="4953" to-port="0"/>
-		<edge from-layer="4952" from-port="0" to-layer="4953" to-port="1"/>
-		<edge from-layer="4953" from-port="2" to-layer="4964" to-port="0"/>
-		<edge from-layer="4954" from-port="0" to-layer="4956" to-port="1"/>
-		<edge from-layer="4955" from-port="0" to-layer="4956" to-port="2"/>
-		<edge from-layer="4956" from-port="3" to-layer="4964" to-port="1"/>
-		<edge from-layer="4957" from-port="0" to-layer="4959" to-port="1"/>
-		<edge from-layer="4958" from-port="0" to-layer="4959" to-port="2"/>
-		<edge from-layer="4959" from-port="3" to-layer="4961" to-port="0"/>
-		<edge from-layer="4960" from-port="0" to-layer="4961" to-port="1"/>
-		<edge from-layer="4961" from-port="2" to-layer="4963" to-port="0"/>
-		<edge from-layer="4962" from-port="0" to-layer="4963" to-port="1"/>
-		<edge from-layer="4963" from-port="2" to-layer="4964" to-port="2"/>
-		<edge from-layer="4964" from-port="3" to-layer="4965" to-port="1"/>
-		<edge from-layer="4965" from-port="2" to-layer="4967" to-port="0"/>
-		<edge from-layer="4966" from-port="0" to-layer="4967" to-port="1"/>
-		<edge from-layer="4967" from-port="2" to-layer="4968" to-port="1"/>
-		<edge from-layer="4968" from-port="2" to-layer="4969" to-port="0"/>
-		<edge from-layer="4969" from-port="2" to-layer="4971" to-port="0"/>
-		<edge from-layer="4969" from-port="2" to-layer="5058" to-port="1"/>
-		<edge from-layer="4970" from-port="0" to-layer="4971" to-port="1"/>
-		<edge from-layer="4971" from-port="2" to-layer="4973" to-port="0"/>
-		<edge from-layer="4972" from-port="0" to-layer="4973" to-port="1"/>
-		<edge from-layer="4973" from-port="2" to-layer="4975" to-port="0"/>
-		<edge from-layer="4974" from-port="0" to-layer="4975" to-port="1"/>
-		<edge from-layer="4975" from-port="2" to-layer="4977" to-port="0"/>
-		<edge from-layer="4976" from-port="0" to-layer="4977" to-port="1"/>
-		<edge from-layer="4977" from-port="2" to-layer="4979" to-port="0"/>
-		<edge from-layer="4977" from-port="2" to-layer="4988" to-port="0"/>
-		<edge from-layer="4978" from-port="0" to-layer="4987" to-port="0"/>
-		<edge from-layer="4979" from-port="1" to-layer="4982" to-port="0"/>
-		<edge from-layer="4979" from-port="1" to-layer="4993" to-port="0"/>
-		<edge from-layer="4979" from-port="1" to-layer="5000" to-port="0"/>
-		<edge from-layer="4980" from-port="0" to-layer="4982" to-port="1"/>
-		<edge from-layer="4981" from-port="0" to-layer="4982" to-port="2"/>
-		<edge from-layer="4982" from-port="3" to-layer="5002" to-port="0"/>
-		<edge from-layer="4982" from-port="3" to-layer="4984" to-port="0"/>
-		<edge from-layer="4983" from-port="0" to-layer="4984" to-port="1"/>
-		<edge from-layer="4984" from-port="2" to-layer="4986" to-port="0"/>
-		<edge from-layer="4985" from-port="0" to-layer="4986" to-port="1"/>
-		<edge from-layer="4986" from-port="2" to-layer="4987" to-port="2"/>
-		<edge from-layer="4987" from-port="3" to-layer="4988" to-port="1"/>
-		<edge from-layer="4988" from-port="2" to-layer="4990" to-port="0"/>
-		<edge from-layer="4989" from-port="0" to-layer="4990" to-port="1"/>
-		<edge from-layer="4990" from-port="2" to-layer="5006" to-port="0"/>
-		<edge from-layer="4991" from-port="0" to-layer="4993" to-port="1"/>
-		<edge from-layer="4992" from-port="0" to-layer="4993" to-port="2"/>
-		<edge from-layer="4993" from-port="3" to-layer="4995" to-port="0"/>
-		<edge from-layer="4994" from-port="0" to-layer="4995" to-port="1"/>
-		<edge from-layer="4995" from-port="2" to-layer="4997" to-port="0"/>
-		<edge from-layer="4996" from-port="0" to-layer="4997" to-port="1"/>
-		<edge from-layer="4997" from-port="2" to-layer="5005" to-port="0"/>
-		<edge from-layer="4998" from-port="0" to-layer="5000" to-port="1"/>
-		<edge from-layer="4999" from-port="0" to-layer="5000" to-port="2"/>
-		<edge from-layer="5000" from-port="3" to-layer="5005" to-port="1"/>
-		<edge from-layer="5001" from-port="0" to-layer="5002" to-port="1"/>
-		<edge from-layer="5002" from-port="2" to-layer="5004" to-port="0"/>
-		<edge from-layer="5003" from-port="0" to-layer="5004" to-port="1"/>
-		<edge from-layer="5004" from-port="2" to-layer="5005" to-port="2"/>
-		<edge from-layer="5005" from-port="3" to-layer="5006" to-port="1"/>
-		<edge from-layer="5006" from-port="2" to-layer="5013" to-port="0"/>
-		<edge from-layer="5007" from-port="0" to-layer="5008" to-port="1"/>
-		<edge from-layer="5008" from-port="2" to-layer="5009" to-port="0"/>
-		<edge from-layer="5009" from-port="2" to-layer="5011" to-port="0"/>
-		<edge from-layer="5010" from-port="0" to-layer="5011" to-port="1"/>
-		<edge from-layer="5011" from-port="2" to-layer="5012" to-port="0"/>
-		<edge from-layer="5012" from-port="2" to-layer="5013" to-port="1"/>
-		<edge from-layer="5013" from-port="2" to-layer="5015" to-port="0"/>
-		<edge from-layer="5014" from-port="0" to-layer="5015" to-port="1"/>
-		<edge from-layer="5015" from-port="2" to-layer="5016" to-port="0"/>
-		<edge from-layer="5016" from-port="1" to-layer="5023" to-port="0"/>
-		<edge from-layer="5017" from-port="0" to-layer="5018" to-port="1"/>
-		<edge from-layer="5018" from-port="2" to-layer="5019" to-port="0"/>
-		<edge from-layer="5019" from-port="2" to-layer="5021" to-port="0"/>
-		<edge from-layer="5020" from-port="0" to-layer="5021" to-port="1"/>
-		<edge from-layer="5021" from-port="2" to-layer="5022" to-port="0"/>
-		<edge from-layer="5022" from-port="2" to-layer="5023" to-port="1"/>
-		<edge from-layer="5023" from-port="2" to-layer="5024" to-port="0"/>
-		<edge from-layer="5023" from-port="2" to-layer="5036" to-port="0"/>
-		<edge from-layer="5024" from-port="1" to-layer="5027" to-port="0"/>
-		<edge from-layer="5024" from-port="1" to-layer="5034" to-port="0"/>
-		<edge from-layer="5024" from-port="1" to-layer="5045" to-port="0"/>
-		<edge from-layer="5024" from-port="1" to-layer="5048" to-port="0"/>
-		<edge from-layer="5025" from-port="0" to-layer="5027" to-port="1"/>
-		<edge from-layer="5026" from-port="0" to-layer="5027" to-port="2"/>
-		<edge from-layer="5027" from-port="3" to-layer="5029" to-port="0"/>
-		<edge from-layer="5027" from-port="3" to-layer="5040" to-port="0"/>
-		<edge from-layer="5028" from-port="0" to-layer="5029" to-port="1"/>
-		<edge from-layer="5029" from-port="2" to-layer="5031" to-port="0"/>
-		<edge from-layer="5030" from-port="0" to-layer="5031" to-port="1"/>
-		<edge from-layer="5031" from-port="2" to-layer="5035" to-port="0"/>
-		<edge from-layer="5032" from-port="0" to-layer="5034" to-port="1"/>
-		<edge from-layer="5033" from-port="0" to-layer="5034" to-port="2"/>
-		<edge from-layer="5034" from-port="3" to-layer="5035" to-port="2"/>
-		<edge from-layer="5035" from-port="3" to-layer="5036" to-port="1"/>
-		<edge from-layer="5036" from-port="2" to-layer="5038" to-port="0"/>
-		<edge from-layer="5037" from-port="0" to-layer="5038" to-port="1"/>
-		<edge from-layer="5038" from-port="2" to-layer="5054" to-port="0"/>
-		<edge from-layer="5039" from-port="0" to-layer="5040" to-port="1"/>
-		<edge from-layer="5040" from-port="2" to-layer="5042" to-port="0"/>
-		<edge from-layer="5041" from-port="0" to-layer="5042" to-port="1"/>
-		<edge from-layer="5042" from-port="2" to-layer="5053" to-port="0"/>
-		<edge from-layer="5043" from-port="0" to-layer="5045" to-port="1"/>
-		<edge from-layer="5044" from-port="0" to-layer="5045" to-port="2"/>
-		<edge from-layer="5045" from-port="3" to-layer="5053" to-port="1"/>
-		<edge from-layer="5046" from-port="0" to-layer="5048" to-port="1"/>
-		<edge from-layer="5047" from-port="0" to-layer="5048" to-port="2"/>
-		<edge from-layer="5048" from-port="3" to-layer="5050" to-port="0"/>
-		<edge from-layer="5049" from-port="0" to-layer="5050" to-port="1"/>
-		<edge from-layer="5050" from-port="2" to-layer="5052" to-port="0"/>
-		<edge from-layer="5051" from-port="0" to-layer="5052" to-port="1"/>
-		<edge from-layer="5052" from-port="2" to-layer="5053" to-port="2"/>
-		<edge from-layer="5053" from-port="3" to-layer="5054" to-port="1"/>
-		<edge from-layer="5054" from-port="2" to-layer="5056" to-port="0"/>
-		<edge from-layer="5055" from-port="0" to-layer="5056" to-port="1"/>
-		<edge from-layer="5056" from-port="2" to-layer="5057" to-port="1"/>
-		<edge from-layer="5057" from-port="2" to-layer="5058" to-port="0"/>
-		<edge from-layer="5058" from-port="2" to-layer="5098" to-port="1"/>
-		<edge from-layer="5058" from-port="2" to-layer="5060" to-port="0"/>
-		<edge from-layer="5059" from-port="0" to-layer="5060" to-port="1"/>
-		<edge from-layer="5060" from-port="2" to-layer="5062" to-port="0"/>
-		<edge from-layer="5061" from-port="0" to-layer="5062" to-port="1"/>
-		<edge from-layer="5062" from-port="2" to-layer="5064" to-port="0"/>
-		<edge from-layer="5063" from-port="0" to-layer="5064" to-port="1"/>
-		<edge from-layer="5064" from-port="2" to-layer="5066" to-port="0"/>
-		<edge from-layer="5065" from-port="0" to-layer="5066" to-port="1"/>
-		<edge from-layer="5066" from-port="2" to-layer="5067" to-port="1"/>
-		<edge from-layer="5067" from-port="2" to-layer="5082" to-port="0"/>
-		<edge from-layer="5067" from-port="2" to-layer="5071" to-port="0"/>
-		<edge from-layer="5067" from-port="2" to-layer="5092" to-port="0"/>
-		<edge from-layer="5068" from-port="0" to-layer="5082" to-port="1"/>
-		<edge from-layer="5069" from-port="0" to-layer="5080" to-port="0"/>
-		<edge from-layer="5070" from-port="0" to-layer="5080" to-port="1"/>
-		<edge from-layer="5071" from-port="1" to-layer="5074" to-port="0"/>
-		<edge from-layer="5072" from-port="0" to-layer="5074" to-port="1"/>
-		<edge from-layer="5073" from-port="0" to-layer="5074" to-port="2"/>
-		<edge from-layer="5074" from-port="3" to-layer="5076" to-port="0"/>
-		<edge from-layer="5075" from-port="0" to-layer="5076" to-port="1"/>
-		<edge from-layer="5076" from-port="2" to-layer="5078" to-port="0"/>
-		<edge from-layer="5077" from-port="0" to-layer="5078" to-port="1"/>
-		<edge from-layer="5078" from-port="2" to-layer="5089" to-port="0"/>
-		<edge from-layer="5078" from-port="2" to-layer="5080" to-port="2"/>
-		<edge from-layer="5078" from-port="2" to-layer="5086" to-port="2"/>
-		<edge from-layer="5079" from-port="0" to-layer="5080" to-port="3"/>
-		<edge from-layer="5080" from-port="4" to-layer="5082" to-port="2"/>
-		<edge from-layer="5081" from-port="0" to-layer="5082" to-port="3"/>
-		<edge from-layer="5082" from-port="4" to-layer="5094" to-port="0"/>
-		<edge from-layer="5083" from-port="0" to-layer="5086" to-port="0"/>
-		<edge from-layer="5084" from-port="0" to-layer="5090" to-port="1"/>
-		<edge from-layer="5084" from-port="0" to-layer="5086" to-port="1"/>
-		<edge from-layer="5085" from-port="0" to-layer="5090" to-port="3"/>
-		<edge from-layer="5085" from-port="0" to-layer="5086" to-port="3"/>
-		<edge from-layer="5086" from-port="4" to-layer="5092" to-port="1"/>
-		<edge from-layer="5087" from-port="0" to-layer="5090" to-port="0"/>
-		<edge from-layer="5088" from-port="0" to-layer="5089" to-port="1"/>
-		<edge from-layer="5089" from-port="2" to-layer="5090" to-port="2"/>
-		<edge from-layer="5090" from-port="4" to-layer="5092" to-port="2"/>
-		<edge from-layer="5091" from-port="0" to-layer="5092" to-port="3"/>
-		<edge from-layer="5092" from-port="4" to-layer="5093" to-port="0"/>
-		<edge from-layer="5093" from-port="1" to-layer="5094" to-port="1"/>
-		<edge from-layer="5094" from-port="2" to-layer="5096" to-port="0"/>
-		<edge from-layer="5095" from-port="0" to-layer="5096" to-port="1"/>
-		<edge from-layer="5096" from-port="2" to-layer="5097" to-port="1"/>
-		<edge from-layer="5097" from-port="2" to-layer="5098" to-port="0"/>
-		<edge from-layer="5098" from-port="2" to-layer="5102" to-port="0"/>
-		<edge from-layer="5099" from-port="0" to-layer="5101" to-port="1"/>
-		<edge from-layer="5100" from-port="0" to-layer="5101" to-port="2"/>
-		<edge from-layer="5101" from-port="3" to-layer="5102" to-port="1"/>
-		<edge from-layer="5102" from-port="2" to-layer="5104" to-port="0"/>
-		<edge from-layer="5103" from-port="0" to-layer="5104" to-port="1"/>
-		<edge from-layer="5104" from-port="2" to-layer="5106" to-port="0"/>
-		<edge from-layer="5105" from-port="0" to-layer="5106" to-port="1"/>
-		<edge from-layer="5106" from-port="2" to-layer="5108" to-port="0"/>
-		<edge from-layer="5107" from-port="0" to-layer="5108" to-port="1"/>
-		<edge from-layer="5108" from-port="2" to-layer="5109" to-port="0"/>
-		<edge from-layer="5109" from-port="2" to-layer="5110" to-port="0"/>
-		<edge from-layer="5110" from-port="2" to-layer="5116" to-port="0"/>
-		<edge from-layer="5110" from-port="2" to-layer="5119" to-port="0"/>
-		<edge from-layer="5110" from-port="2" to-layer="5112" to-port="0"/>
-		<edge from-layer="5111" from-port="0" to-layer="5112" to-port="1"/>
-		<edge from-layer="5112" from-port="2" to-layer="5114" to-port="0"/>
-		<edge from-layer="5113" from-port="0" to-layer="5114" to-port="1"/>
-		<edge from-layer="5114" from-port="2" to-layer="5155" to-port="0"/>
-		<edge from-layer="5115" from-port="0" to-layer="5116" to-port="1"/>
-		<edge from-layer="5116" from-port="2" to-layer="5118" to-port="0"/>
-		<edge from-layer="5117" from-port="0" to-layer="5118" to-port="1"/>
-		<edge from-layer="5118" from-port="2" to-layer="5120" to-port="0"/>
-		<edge from-layer="5119" from-port="1" to-layer="5120" to-port="1"/>
-		<edge from-layer="5120" from-port="2" to-layer="5122" to-port="0"/>
-		<edge from-layer="5121" from-port="0" to-layer="5122" to-port="1"/>
-		<edge from-layer="5122" from-port="2" to-layer="5124" to-port="0"/>
-		<edge from-layer="5123" from-port="0" to-layer="5124" to-port="1"/>
-		<edge from-layer="5124" from-port="2" to-layer="5125" to-port="0"/>
-		<edge from-layer="5125" from-port="1" to-layer="5127" to-port="0"/>
-		<edge from-layer="5126" from-port="0" to-layer="5127" to-port="1"/>
-		<edge from-layer="5127" from-port="2" to-layer="5129" to-port="0"/>
-		<edge from-layer="5128" from-port="0" to-layer="5129" to-port="1"/>
-		<edge from-layer="5129" from-port="2" to-layer="5139" to-port="0"/>
-		<edge from-layer="5130" from-port="1" to-layer="5132" to-port="0"/>
-		<edge from-layer="5131" from-port="0" to-layer="5132" to-port="1"/>
-		<edge from-layer="5132" from-port="2" to-layer="5134" to-port="0"/>
-		<edge from-layer="5133" from-port="0" to-layer="5134" to-port="1"/>
-		<edge from-layer="5134" from-port="2" to-layer="5136" to-port="0"/>
-		<edge from-layer="5135" from-port="0" to-layer="5136" to-port="1"/>
-		<edge from-layer="5136" from-port="2" to-layer="5138" to-port="0"/>
-		<edge from-layer="5137" from-port="0" to-layer="5138" to-port="1"/>
-		<edge from-layer="5138" from-port="2" to-layer="5139" to-port="1"/>
-		<edge from-layer="5139" from-port="2" to-layer="5141" to-port="0"/>
-		<edge from-layer="5139" from-port="2" to-layer="5144" to-port="0"/>
-		<edge from-layer="5140" from-port="0" to-layer="5141" to-port="1"/>
-		<edge from-layer="5141" from-port="2" to-layer="5143" to-port="0"/>
-		<edge from-layer="5142" from-port="0" to-layer="5143" to-port="1"/>
-		<edge from-layer="5143" from-port="2" to-layer="5145" to-port="0"/>
-		<edge from-layer="5144" from-port="1" to-layer="5145" to-port="1"/>
-		<edge from-layer="5145" from-port="2" to-layer="5147" to-port="0"/>
-		<edge from-layer="5146" from-port="0" to-layer="5147" to-port="1"/>
-		<edge from-layer="5147" from-port="2" to-layer="5149" to-port="0"/>
-		<edge from-layer="5148" from-port="0" to-layer="5149" to-port="1"/>
-		<edge from-layer="5149" from-port="2" to-layer="5150" to-port="0"/>
-		<edge from-layer="5150" from-port="1" to-layer="5152" to-port="0"/>
-		<edge from-layer="5151" from-port="0" to-layer="5152" to-port="1"/>
-		<edge from-layer="5152" from-port="2" to-layer="5154" to-port="0"/>
-		<edge from-layer="5153" from-port="0" to-layer="5154" to-port="1"/>
-		<edge from-layer="5154" from-port="2" to-layer="5155" to-port="1"/>
-		<edge from-layer="5155" from-port="2" to-layer="5467" to-port="1"/>
-		<edge from-layer="5155" from-port="2" to-layer="5157" to-port="0"/>
-		<edge from-layer="5155" from-port="2" to-layer="5160" to-port="0"/>
-		<edge from-layer="5156" from-port="0" to-layer="5157" to-port="1"/>
-		<edge from-layer="5157" from-port="2" to-layer="5159" to-port="0"/>
-		<edge from-layer="5158" from-port="0" to-layer="5159" to-port="1"/>
-		<edge from-layer="5159" from-port="2" to-layer="5161" to-port="0"/>
-		<edge from-layer="5160" from-port="1" to-layer="5459" to-port="0"/>
-		<edge from-layer="5160" from-port="1" to-layer="5177" to-port="0"/>
-		<edge from-layer="5160" from-port="1" to-layer="5186" to-port="0"/>
-		<edge from-layer="5160" from-port="1" to-layer="5180" to-port="0"/>
-		<edge from-layer="5160" from-port="1" to-layer="5161" to-port="1"/>
-		<edge from-layer="5160" from-port="1" to-layer="5174" to-port="0"/>
-		<edge from-layer="5161" from-port="2" to-layer="5163" to-port="0"/>
-		<edge from-layer="5162" from-port="0" to-layer="5163" to-port="1"/>
-		<edge from-layer="5163" from-port="2" to-layer="5165" to-port="0"/>
-		<edge from-layer="5164" from-port="0" to-layer="5165" to-port="1"/>
-		<edge from-layer="5165" from-port="2" to-layer="5167" to-port="0"/>
-		<edge from-layer="5166" from-port="0" to-layer="5167" to-port="1"/>
-		<edge from-layer="5167" from-port="2" to-layer="5169" to-port="0"/>
-		<edge from-layer="5168" from-port="0" to-layer="5169" to-port="1"/>
-		<edge from-layer="5169" from-port="2" to-layer="5171" to-port="0"/>
-		<edge from-layer="5170" from-port="0" to-layer="5171" to-port="1"/>
-		<edge from-layer="5171" from-port="2" to-layer="5188" to-port="0"/>
-		<edge from-layer="5172" from-port="0" to-layer="5174" to-port="1"/>
-		<edge from-layer="5173" from-port="0" to-layer="5174" to-port="2"/>
-		<edge from-layer="5174" from-port="3" to-layer="5187" to-port="0"/>
-		<edge from-layer="5175" from-port="0" to-layer="5177" to-port="1"/>
-		<edge from-layer="5176" from-port="0" to-layer="5177" to-port="2"/>
-		<edge from-layer="5177" from-port="3" to-layer="5181" to-port="0"/>
-		<edge from-layer="5178" from-port="0" to-layer="5180" to-port="1"/>
-		<edge from-layer="5179" from-port="0" to-layer="5180" to-port="2"/>
-		<edge from-layer="5180" from-port="3" to-layer="5181" to-port="1"/>
-		<edge from-layer="5181" from-port="2" to-layer="5183" to-port="0"/>
-		<edge from-layer="5182" from-port="0" to-layer="5183" to-port="1"/>
-		<edge from-layer="5183" from-port="2" to-layer="5187" to-port="1"/>
-		<edge from-layer="5184" from-port="0" to-layer="5186" to-port="1"/>
-		<edge from-layer="5185" from-port="0" to-layer="5186" to-port="2"/>
-		<edge from-layer="5186" from-port="3" to-layer="5187" to-port="2"/>
-		<edge from-layer="5187" from-port="3" to-layer="5188" to-port="1"/>
-		<edge from-layer="5188" from-port="2" to-layer="5190" to-port="0"/>
-		<edge from-layer="5188" from-port="2" to-layer="5327" to-port="1"/>
-		<edge from-layer="5189" from-port="0" to-layer="5190" to-port="1"/>
-		<edge from-layer="5190" from-port="2" to-layer="5192" to-port="0"/>
-		<edge from-layer="5191" from-port="0" to-layer="5192" to-port="1"/>
-		<edge from-layer="5192" from-port="2" to-layer="5194" to-port="0"/>
-		<edge from-layer="5193" from-port="0" to-layer="5194" to-port="1"/>
-		<edge from-layer="5194" from-port="2" to-layer="5262" to-port="0"/>
-		<edge from-layer="5194" from-port="2" to-layer="5196" to-port="0"/>
-		<edge from-layer="5194" from-port="2" to-layer="5227" to-port="0"/>
-		<edge from-layer="5195" from-port="0" to-layer="5196" to-port="1"/>
-		<edge from-layer="5196" from-port="2" to-layer="5207" to-port="0"/>
-		<edge from-layer="5196" from-port="2" to-layer="5198" to-port="0"/>
-		<edge from-layer="5197" from-port="0" to-layer="5206" to-port="0"/>
-		<edge from-layer="5198" from-port="1" to-layer="5219" to-port="0"/>
-		<edge from-layer="5198" from-port="1" to-layer="5201" to-port="0"/>
-		<edge from-layer="5198" from-port="1" to-layer="5212" to-port="0"/>
-		<edge from-layer="5199" from-port="0" to-layer="5201" to-port="1"/>
-		<edge from-layer="5200" from-port="0" to-layer="5201" to-port="2"/>
-		<edge from-layer="5201" from-port="3" to-layer="5203" to-port="0"/>
-		<edge from-layer="5201" from-port="3" to-layer="5221" to-port="0"/>
-		<edge from-layer="5202" from-port="0" to-layer="5203" to-port="1"/>
-		<edge from-layer="5203" from-port="2" to-layer="5205" to-port="0"/>
-		<edge from-layer="5204" from-port="0" to-layer="5205" to-port="1"/>
-		<edge from-layer="5205" from-port="2" to-layer="5206" to-port="2"/>
-		<edge from-layer="5206" from-port="3" to-layer="5207" to-port="1"/>
-		<edge from-layer="5207" from-port="2" to-layer="5209" to-port="0"/>
-		<edge from-layer="5208" from-port="0" to-layer="5209" to-port="1"/>
-		<edge from-layer="5209" from-port="2" to-layer="5225" to-port="0"/>
-		<edge from-layer="5210" from-port="0" to-layer="5212" to-port="1"/>
-		<edge from-layer="5211" from-port="0" to-layer="5212" to-port="2"/>
-		<edge from-layer="5212" from-port="3" to-layer="5214" to-port="0"/>
-		<edge from-layer="5213" from-port="0" to-layer="5214" to-port="1"/>
-		<edge from-layer="5214" from-port="2" to-layer="5216" to-port="0"/>
-		<edge from-layer="5215" from-port="0" to-layer="5216" to-port="1"/>
-		<edge from-layer="5216" from-port="2" to-layer="5224" to-port="0"/>
-		<edge from-layer="5217" from-port="0" to-layer="5219" to-port="1"/>
-		<edge from-layer="5218" from-port="0" to-layer="5219" to-port="2"/>
-		<edge from-layer="5219" from-port="3" to-layer="5224" to-port="1"/>
-		<edge from-layer="5220" from-port="0" to-layer="5221" to-port="1"/>
-		<edge from-layer="5221" from-port="2" to-layer="5223" to-port="0"/>
-		<edge from-layer="5222" from-port="0" to-layer="5223" to-port="1"/>
-		<edge from-layer="5223" from-port="2" to-layer="5224" to-port="2"/>
-		<edge from-layer="5224" from-port="3" to-layer="5225" to-port="1"/>
-		<edge from-layer="5225" from-port="2" to-layer="5257" to-port="0"/>
-		<edge from-layer="5226" from-port="0" to-layer="5227" to-port="1"/>
-		<edge from-layer="5227" from-port="2" to-layer="5229" to-port="0"/>
-		<edge from-layer="5227" from-port="2" to-layer="5238" to-port="0"/>
-		<edge from-layer="5228" from-port="0" to-layer="5237" to-port="0"/>
-		<edge from-layer="5229" from-port="1" to-layer="5250" to-port="0"/>
-		<edge from-layer="5229" from-port="1" to-layer="5243" to-port="0"/>
-		<edge from-layer="5229" from-port="1" to-layer="5232" to-port="0"/>
-		<edge from-layer="5230" from-port="0" to-layer="5232" to-port="1"/>
-		<edge from-layer="5231" from-port="0" to-layer="5232" to-port="2"/>
-		<edge from-layer="5232" from-port="3" to-layer="5234" to-port="0"/>
-		<edge from-layer="5232" from-port="3" to-layer="5252" to-port="0"/>
-		<edge from-layer="5233" from-port="0" to-layer="5234" to-port="1"/>
-		<edge from-layer="5234" from-port="2" to-layer="5236" to-port="0"/>
-		<edge from-layer="5235" from-port="0" to-layer="5236" to-port="1"/>
-		<edge from-layer="5236" from-port="2" to-layer="5237" to-port="2"/>
-		<edge from-layer="5237" from-port="3" to-layer="5238" to-port="1"/>
-		<edge from-layer="5238" from-port="2" to-layer="5240" to-port="0"/>
-		<edge from-layer="5239" from-port="0" to-layer="5240" to-port="1"/>
-		<edge from-layer="5240" from-port="2" to-layer="5256" to-port="0"/>
-		<edge from-layer="5241" from-port="0" to-layer="5243" to-port="1"/>
-		<edge from-layer="5242" from-port="0" to-layer="5243" to-port="2"/>
-		<edge from-layer="5243" from-port="3" to-layer="5245" to-port="0"/>
-		<edge from-layer="5244" from-port="0" to-layer="5245" to-port="1"/>
-		<edge from-layer="5245" from-port="2" to-layer="5247" to-port="0"/>
-		<edge from-layer="5246" from-port="0" to-layer="5247" to-port="1"/>
-		<edge from-layer="5247" from-port="2" to-layer="5255" to-port="0"/>
-		<edge from-layer="5248" from-port="0" to-layer="5250" to-port="1"/>
-		<edge from-layer="5249" from-port="0" to-layer="5250" to-port="2"/>
-		<edge from-layer="5250" from-port="3" to-layer="5255" to-port="1"/>
-		<edge from-layer="5251" from-port="0" to-layer="5252" to-port="1"/>
-		<edge from-layer="5252" from-port="2" to-layer="5254" to-port="0"/>
-		<edge from-layer="5253" from-port="0" to-layer="5254" to-port="1"/>
-		<edge from-layer="5254" from-port="2" to-layer="5255" to-port="2"/>
-		<edge from-layer="5255" from-port="3" to-layer="5256" to-port="1"/>
-		<edge from-layer="5256" from-port="2" to-layer="5257" to-port="1"/>
-		<edge from-layer="5257" from-port="2" to-layer="5259" to-port="0"/>
-		<edge from-layer="5258" from-port="0" to-layer="5259" to-port="1"/>
-		<edge from-layer="5259" from-port="2" to-layer="5260" to-port="0"/>
-		<edge from-layer="5260" from-port="1" to-layer="5292" to-port="0"/>
-		<edge from-layer="5261" from-port="0" to-layer="5262" to-port="1"/>
-		<edge from-layer="5262" from-port="2" to-layer="5273" to-port="0"/>
-		<edge from-layer="5262" from-port="2" to-layer="5264" to-port="0"/>
-		<edge from-layer="5263" from-port="0" to-layer="5272" to-port="0"/>
-		<edge from-layer="5264" from-port="1" to-layer="5278" to-port="0"/>
-		<edge from-layer="5264" from-port="1" to-layer="5285" to-port="0"/>
-		<edge from-layer="5264" from-port="1" to-layer="5267" to-port="0"/>
-		<edge from-layer="5265" from-port="0" to-layer="5267" to-port="1"/>
-		<edge from-layer="5266" from-port="0" to-layer="5267" to-port="2"/>
-		<edge from-layer="5267" from-port="3" to-layer="5287" to-port="0"/>
-		<edge from-layer="5267" from-port="3" to-layer="5269" to-port="0"/>
-		<edge from-layer="5268" from-port="0" to-layer="5269" to-port="1"/>
-		<edge from-layer="5269" from-port="2" to-layer="5271" to-port="0"/>
-		<edge from-layer="5270" from-port="0" to-layer="5271" to-port="1"/>
-		<edge from-layer="5271" from-port="2" to-layer="5272" to-port="2"/>
-		<edge from-layer="5272" from-port="3" to-layer="5273" to-port="1"/>
-		<edge from-layer="5273" from-port="2" to-layer="5275" to-port="0"/>
-		<edge from-layer="5274" from-port="0" to-layer="5275" to-port="1"/>
-		<edge from-layer="5275" from-port="2" to-layer="5291" to-port="0"/>
-		<edge from-layer="5276" from-port="0" to-layer="5278" to-port="1"/>
-		<edge from-layer="5277" from-port="0" to-layer="5278" to-port="2"/>
-		<edge from-layer="5278" from-port="3" to-layer="5280" to-port="0"/>
-		<edge from-layer="5279" from-port="0" to-layer="5280" to-port="1"/>
-		<edge from-layer="5280" from-port="2" to-layer="5282" to-port="0"/>
-		<edge from-layer="5281" from-port="0" to-layer="5282" to-port="1"/>
-		<edge from-layer="5282" from-port="2" to-layer="5290" to-port="0"/>
-		<edge from-layer="5283" from-port="0" to-layer="5285" to-port="1"/>
-		<edge from-layer="5284" from-port="0" to-layer="5285" to-port="2"/>
-		<edge from-layer="5285" from-port="3" to-layer="5290" to-port="1"/>
-		<edge from-layer="5286" from-port="0" to-layer="5287" to-port="1"/>
-		<edge from-layer="5287" from-port="2" to-layer="5289" to-port="0"/>
-		<edge from-layer="5288" from-port="0" to-layer="5289" to-port="1"/>
-		<edge from-layer="5289" from-port="2" to-layer="5290" to-port="2"/>
-		<edge from-layer="5290" from-port="3" to-layer="5291" to-port="1"/>
-		<edge from-layer="5291" from-port="2" to-layer="5292" to-port="1"/>
-		<edge from-layer="5292" from-port="2" to-layer="5305" to-port="0"/>
-		<edge from-layer="5292" from-port="2" to-layer="5293" to-port="0"/>
-		<edge from-layer="5293" from-port="1" to-layer="5303" to-port="0"/>
-		<edge from-layer="5293" from-port="1" to-layer="5317" to-port="0"/>
-		<edge from-layer="5293" from-port="1" to-layer="5314" to-port="0"/>
-		<edge from-layer="5293" from-port="1" to-layer="5296" to-port="0"/>
-		<edge from-layer="5294" from-port="0" to-layer="5296" to-port="1"/>
-		<edge from-layer="5295" from-port="0" to-layer="5296" to-port="2"/>
-		<edge from-layer="5296" from-port="3" to-layer="5309" to-port="0"/>
-		<edge from-layer="5296" from-port="3" to-layer="5298" to-port="0"/>
-		<edge from-layer="5297" from-port="0" to-layer="5298" to-port="1"/>
-		<edge from-layer="5298" from-port="2" to-layer="5300" to-port="0"/>
-		<edge from-layer="5299" from-port="0" to-layer="5300" to-port="1"/>
-		<edge from-layer="5300" from-port="2" to-layer="5304" to-port="0"/>
-		<edge from-layer="5301" from-port="0" to-layer="5303" to-port="1"/>
-		<edge from-layer="5302" from-port="0" to-layer="5303" to-port="2"/>
-		<edge from-layer="5303" from-port="3" to-layer="5304" to-port="2"/>
-		<edge from-layer="5304" from-port="3" to-layer="5305" to-port="1"/>
-		<edge from-layer="5305" from-port="2" to-layer="5307" to-port="0"/>
-		<edge from-layer="5306" from-port="0" to-layer="5307" to-port="1"/>
-		<edge from-layer="5307" from-port="2" to-layer="5323" to-port="0"/>
-		<edge from-layer="5308" from-port="0" to-layer="5309" to-port="1"/>
-		<edge from-layer="5309" from-port="2" to-layer="5311" to-port="0"/>
-		<edge from-layer="5310" from-port="0" to-layer="5311" to-port="1"/>
-		<edge from-layer="5311" from-port="2" to-layer="5322" to-port="0"/>
-		<edge from-layer="5312" from-port="0" to-layer="5314" to-port="1"/>
-		<edge from-layer="5313" from-port="0" to-layer="5314" to-port="2"/>
-		<edge from-layer="5314" from-port="3" to-layer="5322" to-port="1"/>
-		<edge from-layer="5315" from-port="0" to-layer="5317" to-port="1"/>
-		<edge from-layer="5316" from-port="0" to-layer="5317" to-port="2"/>
-		<edge from-layer="5317" from-port="3" to-layer="5319" to-port="0"/>
-		<edge from-layer="5318" from-port="0" to-layer="5319" to-port="1"/>
-		<edge from-layer="5319" from-port="2" to-layer="5321" to-port="0"/>
-		<edge from-layer="5320" from-port="0" to-layer="5321" to-port="1"/>
-		<edge from-layer="5321" from-port="2" to-layer="5322" to-port="2"/>
-		<edge from-layer="5322" from-port="3" to-layer="5323" to-port="1"/>
-		<edge from-layer="5323" from-port="2" to-layer="5325" to-port="0"/>
-		<edge from-layer="5324" from-port="0" to-layer="5325" to-port="1"/>
-		<edge from-layer="5325" from-port="2" to-layer="5326" to-port="1"/>
-		<edge from-layer="5326" from-port="2" to-layer="5327" to-port="0"/>
-		<edge from-layer="5327" from-port="2" to-layer="5329" to-port="0"/>
-		<edge from-layer="5327" from-port="2" to-layer="5416" to-port="1"/>
-		<edge from-layer="5328" from-port="0" to-layer="5329" to-port="1"/>
-		<edge from-layer="5329" from-port="2" to-layer="5331" to-port="0"/>
-		<edge from-layer="5330" from-port="0" to-layer="5331" to-port="1"/>
-		<edge from-layer="5331" from-port="2" to-layer="5333" to-port="0"/>
-		<edge from-layer="5332" from-port="0" to-layer="5333" to-port="1"/>
-		<edge from-layer="5333" from-port="2" to-layer="5335" to-port="0"/>
-		<edge from-layer="5334" from-port="0" to-layer="5335" to-port="1"/>
-		<edge from-layer="5335" from-port="2" to-layer="5337" to-port="0"/>
-		<edge from-layer="5335" from-port="2" to-layer="5346" to-port="0"/>
-		<edge from-layer="5336" from-port="0" to-layer="5345" to-port="0"/>
-		<edge from-layer="5337" from-port="1" to-layer="5351" to-port="0"/>
-		<edge from-layer="5337" from-port="1" to-layer="5358" to-port="0"/>
-		<edge from-layer="5337" from-port="1" to-layer="5340" to-port="0"/>
-		<edge from-layer="5338" from-port="0" to-layer="5340" to-port="1"/>
-		<edge from-layer="5339" from-port="0" to-layer="5340" to-port="2"/>
-		<edge from-layer="5340" from-port="3" to-layer="5360" to-port="0"/>
-		<edge from-layer="5340" from-port="3" to-layer="5342" to-port="0"/>
-		<edge from-layer="5341" from-port="0" to-layer="5342" to-port="1"/>
-		<edge from-layer="5342" from-port="2" to-layer="5344" to-port="0"/>
-		<edge from-layer="5343" from-port="0" to-layer="5344" to-port="1"/>
-		<edge from-layer="5344" from-port="2" to-layer="5345" to-port="2"/>
-		<edge from-layer="5345" from-port="3" to-layer="5346" to-port="1"/>
-		<edge from-layer="5346" from-port="2" to-layer="5348" to-port="0"/>
-		<edge from-layer="5347" from-port="0" to-layer="5348" to-port="1"/>
-		<edge from-layer="5348" from-port="2" to-layer="5364" to-port="0"/>
-		<edge from-layer="5349" from-port="0" to-layer="5351" to-port="1"/>
-		<edge from-layer="5350" from-port="0" to-layer="5351" to-port="2"/>
-		<edge from-layer="5351" from-port="3" to-layer="5353" to-port="0"/>
-		<edge from-layer="5352" from-port="0" to-layer="5353" to-port="1"/>
-		<edge from-layer="5353" from-port="2" to-layer="5355" to-port="0"/>
-		<edge from-layer="5354" from-port="0" to-layer="5355" to-port="1"/>
-		<edge from-layer="5355" from-port="2" to-layer="5363" to-port="0"/>
-		<edge from-layer="5356" from-port="0" to-layer="5358" to-port="1"/>
-		<edge from-layer="5357" from-port="0" to-layer="5358" to-port="2"/>
-		<edge from-layer="5358" from-port="3" to-layer="5363" to-port="1"/>
-		<edge from-layer="5359" from-port="0" to-layer="5360" to-port="1"/>
-		<edge from-layer="5360" from-port="2" to-layer="5362" to-port="0"/>
-		<edge from-layer="5361" from-port="0" to-layer="5362" to-port="1"/>
-		<edge from-layer="5362" from-port="2" to-layer="5363" to-port="2"/>
-		<edge from-layer="5363" from-port="3" to-layer="5364" to-port="1"/>
-		<edge from-layer="5364" from-port="2" to-layer="5371" to-port="0"/>
-		<edge from-layer="5365" from-port="0" to-layer="5366" to-port="1"/>
-		<edge from-layer="5366" from-port="2" to-layer="5367" to-port="0"/>
-		<edge from-layer="5367" from-port="2" to-layer="5369" to-port="0"/>
-		<edge from-layer="5368" from-port="0" to-layer="5369" to-port="1"/>
-		<edge from-layer="5369" from-port="2" to-layer="5370" to-port="0"/>
-		<edge from-layer="5370" from-port="2" to-layer="5371" to-port="1"/>
-		<edge from-layer="5371" from-port="2" to-layer="5373" to-port="0"/>
-		<edge from-layer="5372" from-port="0" to-layer="5373" to-port="1"/>
-		<edge from-layer="5373" from-port="2" to-layer="5374" to-port="0"/>
-		<edge from-layer="5374" from-port="1" to-layer="5381" to-port="0"/>
-		<edge from-layer="5375" from-port="0" to-layer="5376" to-port="1"/>
-		<edge from-layer="5376" from-port="2" to-layer="5377" to-port="0"/>
-		<edge from-layer="5377" from-port="2" to-layer="5379" to-port="0"/>
-		<edge from-layer="5378" from-port="0" to-layer="5379" to-port="1"/>
-		<edge from-layer="5379" from-port="2" to-layer="5380" to-port="0"/>
-		<edge from-layer="5380" from-port="2" to-layer="5381" to-port="1"/>
-		<edge from-layer="5381" from-port="2" to-layer="5394" to-port="0"/>
-		<edge from-layer="5381" from-port="2" to-layer="5382" to-port="0"/>
-		<edge from-layer="5382" from-port="1" to-layer="5406" to-port="0"/>
-		<edge from-layer="5382" from-port="1" to-layer="5385" to-port="0"/>
-		<edge from-layer="5382" from-port="1" to-layer="5403" to-port="0"/>
-		<edge from-layer="5382" from-port="1" to-layer="5392" to-port="0"/>
-		<edge from-layer="5383" from-port="0" to-layer="5385" to-port="1"/>
-		<edge from-layer="5384" from-port="0" to-layer="5385" to-port="2"/>
-		<edge from-layer="5385" from-port="3" to-layer="5398" to-port="0"/>
-		<edge from-layer="5385" from-port="3" to-layer="5387" to-port="0"/>
-		<edge from-layer="5386" from-port="0" to-layer="5387" to-port="1"/>
-		<edge from-layer="5387" from-port="2" to-layer="5389" to-port="0"/>
-		<edge from-layer="5388" from-port="0" to-layer="5389" to-port="1"/>
-		<edge from-layer="5389" from-port="2" to-layer="5393" to-port="0"/>
-		<edge from-layer="5390" from-port="0" to-layer="5392" to-port="1"/>
-		<edge from-layer="5391" from-port="0" to-layer="5392" to-port="2"/>
-		<edge from-layer="5392" from-port="3" to-layer="5393" to-port="2"/>
-		<edge from-layer="5393" from-port="3" to-layer="5394" to-port="1"/>
-		<edge from-layer="5394" from-port="2" to-layer="5396" to-port="0"/>
-		<edge from-layer="5395" from-port="0" to-layer="5396" to-port="1"/>
-		<edge from-layer="5396" from-port="2" to-layer="5412" to-port="0"/>
-		<edge from-layer="5397" from-port="0" to-layer="5398" to-port="1"/>
-		<edge from-layer="5398" from-port="2" to-layer="5400" to-port="0"/>
-		<edge from-layer="5399" from-port="0" to-layer="5400" to-port="1"/>
-		<edge from-layer="5400" from-port="2" to-layer="5411" to-port="0"/>
-		<edge from-layer="5401" from-port="0" to-layer="5403" to-port="1"/>
-		<edge from-layer="5402" from-port="0" to-layer="5403" to-port="2"/>
-		<edge from-layer="5403" from-port="3" to-layer="5411" to-port="1"/>
-		<edge from-layer="5404" from-port="0" to-layer="5406" to-port="1"/>
-		<edge from-layer="5405" from-port="0" to-layer="5406" to-port="2"/>
-		<edge from-layer="5406" from-port="3" to-layer="5408" to-port="0"/>
-		<edge from-layer="5407" from-port="0" to-layer="5408" to-port="1"/>
-		<edge from-layer="5408" from-port="2" to-layer="5410" to-port="0"/>
-		<edge from-layer="5409" from-port="0" to-layer="5410" to-port="1"/>
-		<edge from-layer="5410" from-port="2" to-layer="5411" to-port="2"/>
-		<edge from-layer="5411" from-port="3" to-layer="5412" to-port="1"/>
-		<edge from-layer="5412" from-port="2" to-layer="5414" to-port="0"/>
-		<edge from-layer="5413" from-port="0" to-layer="5414" to-port="1"/>
-		<edge from-layer="5414" from-port="2" to-layer="5415" to-port="1"/>
-		<edge from-layer="5415" from-port="2" to-layer="5416" to-port="0"/>
-		<edge from-layer="5416" from-port="2" to-layer="5418" to-port="0"/>
-		<edge from-layer="5416" from-port="2" to-layer="5456" to-port="1"/>
-		<edge from-layer="5417" from-port="0" to-layer="5418" to-port="1"/>
-		<edge from-layer="5418" from-port="2" to-layer="5420" to-port="0"/>
-		<edge from-layer="5419" from-port="0" to-layer="5420" to-port="1"/>
-		<edge from-layer="5420" from-port="2" to-layer="5422" to-port="0"/>
-		<edge from-layer="5421" from-port="0" to-layer="5422" to-port="1"/>
-		<edge from-layer="5422" from-port="2" to-layer="5424" to-port="0"/>
-		<edge from-layer="5423" from-port="0" to-layer="5424" to-port="1"/>
-		<edge from-layer="5424" from-port="2" to-layer="5425" to-port="1"/>
-		<edge from-layer="5425" from-port="2" to-layer="5429" to-port="0"/>
-		<edge from-layer="5425" from-port="2" to-layer="5450" to-port="0"/>
-		<edge from-layer="5425" from-port="2" to-layer="5440" to-port="0"/>
-		<edge from-layer="5426" from-port="0" to-layer="5440" to-port="1"/>
-		<edge from-layer="5427" from-port="0" to-layer="5438" to-port="0"/>
-		<edge from-layer="5428" from-port="0" to-layer="5438" to-port="1"/>
-		<edge from-layer="5429" from-port="1" to-layer="5432" to-port="0"/>
-		<edge from-layer="5430" from-port="0" to-layer="5432" to-port="1"/>
-		<edge from-layer="5431" from-port="0" to-layer="5432" to-port="2"/>
-		<edge from-layer="5432" from-port="3" to-layer="5434" to-port="0"/>
-		<edge from-layer="5433" from-port="0" to-layer="5434" to-port="1"/>
-		<edge from-layer="5434" from-port="2" to-layer="5436" to-port="0"/>
-		<edge from-layer="5435" from-port="0" to-layer="5436" to-port="1"/>
-		<edge from-layer="5436" from-port="2" to-layer="5444" to-port="2"/>
-		<edge from-layer="5436" from-port="2" to-layer="5447" to-port="0"/>
-		<edge from-layer="5436" from-port="2" to-layer="5438" to-port="2"/>
-		<edge from-layer="5437" from-port="0" to-layer="5438" to-port="3"/>
-		<edge from-layer="5438" from-port="4" to-layer="5440" to-port="2"/>
-		<edge from-layer="5439" from-port="0" to-layer="5440" to-port="3"/>
-		<edge from-layer="5440" from-port="4" to-layer="5452" to-port="0"/>
-		<edge from-layer="5441" from-port="0" to-layer="5444" to-port="0"/>
-		<edge from-layer="5442" from-port="0" to-layer="5448" to-port="1"/>
-		<edge from-layer="5442" from-port="0" to-layer="5444" to-port="1"/>
-		<edge from-layer="5443" from-port="0" to-layer="5448" to-port="3"/>
-		<edge from-layer="5443" from-port="0" to-layer="5444" to-port="3"/>
-		<edge from-layer="5444" from-port="4" to-layer="5450" to-port="1"/>
-		<edge from-layer="5445" from-port="0" to-layer="5448" to-port="0"/>
-		<edge from-layer="5446" from-port="0" to-layer="5447" to-port="1"/>
-		<edge from-layer="5447" from-port="2" to-layer="5448" to-port="2"/>
-		<edge from-layer="5448" from-port="4" to-layer="5450" to-port="2"/>
-		<edge from-layer="5449" from-port="0" to-layer="5450" to-port="3"/>
-		<edge from-layer="5450" from-port="4" to-layer="5451" to-port="0"/>
-		<edge from-layer="5451" from-port="1" to-layer="5452" to-port="1"/>
-		<edge from-layer="5452" from-port="2" to-layer="5454" to-port="0"/>
-		<edge from-layer="5453" from-port="0" to-layer="5454" to-port="1"/>
-		<edge from-layer="5454" from-port="2" to-layer="5455" to-port="1"/>
-		<edge from-layer="5455" from-port="2" to-layer="5456" to-port="0"/>
-		<edge from-layer="5456" from-port="2" to-layer="5460" to-port="0"/>
-		<edge from-layer="5457" from-port="0" to-layer="5459" to-port="1"/>
-		<edge from-layer="5458" from-port="0" to-layer="5459" to-port="2"/>
-		<edge from-layer="5459" from-port="3" to-layer="5460" to-port="1"/>
-		<edge from-layer="5460" from-port="2" to-layer="5462" to-port="0"/>
-		<edge from-layer="5461" from-port="0" to-layer="5462" to-port="1"/>
-		<edge from-layer="5462" from-port="2" to-layer="5464" to-port="0"/>
-		<edge from-layer="5463" from-port="0" to-layer="5464" to-port="1"/>
-		<edge from-layer="5464" from-port="2" to-layer="5466" to-port="0"/>
-		<edge from-layer="5465" from-port="0" to-layer="5466" to-port="1"/>
-		<edge from-layer="5466" from-port="2" to-layer="5467" to-port="0"/>
-		<edge from-layer="5467" from-port="2" to-layer="5472" to-port="0"/>
-		<edge from-layer="5467" from-port="2" to-layer="5469" to-port="0"/>
-		<edge from-layer="5468" from-port="0" to-layer="5469" to-port="1"/>
-		<edge from-layer="5469" from-port="2" to-layer="5471" to-port="0"/>
-		<edge from-layer="5470" from-port="0" to-layer="5471" to-port="1"/>
-		<edge from-layer="5471" from-port="2" to-layer="5473" to-port="0"/>
-		<edge from-layer="5472" from-port="1" to-layer="5473" to-port="1"/>
-		<edge from-layer="5473" from-port="2" to-layer="5475" to-port="0"/>
-		<edge from-layer="5474" from-port="0" to-layer="5475" to-port="1"/>
-		<edge from-layer="5475" from-port="2" to-layer="5477" to-port="0"/>
-		<edge from-layer="5476" from-port="0" to-layer="5477" to-port="1"/>
-		<edge from-layer="5477" from-port="2" to-layer="5478" to-port="0"/>
-		<edge from-layer="5478" from-port="1" to-layer="5480" to-port="0"/>
-		<edge from-layer="5479" from-port="0" to-layer="5480" to-port="1"/>
-		<edge from-layer="5480" from-port="2" to-layer="5482" to-port="0"/>
-		<edge from-layer="5481" from-port="0" to-layer="5482" to-port="1"/>
-		<edge from-layer="5482" from-port="2" to-layer="5483" to-port="0"/>
+		<edge from-layer="0" from-port="0" to-layer="2303" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1278" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3267" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1846" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1495" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="2779" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="2317" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1264" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="2541" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="2555" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1509" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="2793" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3029" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3043" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3281" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1832" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3505" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3519" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3755" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3769" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="3993" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="4007" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="4231" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="4245" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="547" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="561" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="790" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="804" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="330" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="316" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1021" to-port="0" />
+		<edge from-layer="0" from-port="0" to-layer="1035" to-port="0" />
+		<edge from-layer="1" from-port="0" to-layer="157" to-port="0" />
+		<edge from-layer="2" from-port="0" to-layer="133" to-port="0" />
+		<edge from-layer="3" from-port="0" to-layer="4" to-port="0" />
+		<edge from-layer="4" from-port="1" to-layer="4306" to-port="0" />
+		<edge from-layer="5" from-port="0" to-layer="6" to-port="0" />
+		<edge from-layer="6" from-port="1" to-layer="4275" to-port="0" />
+		<edge from-layer="7" from-port="0" to-layer="8" to-port="0" />
+		<edge from-layer="8" from-port="1" to-layer="4262" to-port="0" />
+		<edge from-layer="9" from-port="0" to-layer="10" to-port="0" />
+		<edge from-layer="10" from-port="1" to-layer="4210" to-port="0" />
+		<edge from-layer="11" from-port="0" to-layer="12" to-port="0" />
+		<edge from-layer="12" from-port="1" to-layer="4068" to-port="0" />
+		<edge from-layer="13" from-port="0" to-layer="14" to-port="0" />
+		<edge from-layer="14" from-port="1" to-layer="4037" to-port="0" />
+		<edge from-layer="15" from-port="0" to-layer="16" to-port="0" />
+		<edge from-layer="16" from-port="1" to-layer="4024" to-port="0" />
+		<edge from-layer="17" from-port="0" to-layer="18" to-port="0" />
+		<edge from-layer="18" from-port="1" to-layer="3972" to-port="0" />
+		<edge from-layer="19" from-port="0" to-layer="20" to-port="0" />
+		<edge from-layer="20" from-port="1" to-layer="3830" to-port="0" />
+		<edge from-layer="21" from-port="0" to-layer="22" to-port="0" />
+		<edge from-layer="22" from-port="1" to-layer="3799" to-port="0" />
+		<edge from-layer="23" from-port="0" to-layer="24" to-port="0" />
+		<edge from-layer="24" from-port="1" to-layer="3786" to-port="0" />
+		<edge from-layer="25" from-port="0" to-layer="26" to-port="0" />
+		<edge from-layer="26" from-port="1" to-layer="3734" to-port="0" />
+		<edge from-layer="27" from-port="0" to-layer="28" to-port="0" />
+		<edge from-layer="28" from-port="1" to-layer="3580" to-port="0" />
+		<edge from-layer="29" from-port="0" to-layer="30" to-port="0" />
+		<edge from-layer="30" from-port="1" to-layer="3549" to-port="0" />
+		<edge from-layer="31" from-port="0" to-layer="32" to-port="0" />
+		<edge from-layer="32" from-port="1" to-layer="3536" to-port="0" />
+		<edge from-layer="33" from-port="0" to-layer="34" to-port="0" />
+		<edge from-layer="34" from-port="1" to-layer="3484" to-port="0" />
+		<edge from-layer="35" from-port="0" to-layer="36" to-port="0" />
+		<edge from-layer="36" from-port="1" to-layer="3342" to-port="0" />
+		<edge from-layer="37" from-port="0" to-layer="38" to-port="0" />
+		<edge from-layer="38" from-port="1" to-layer="3311" to-port="0" />
+		<edge from-layer="39" from-port="0" to-layer="40" to-port="0" />
+		<edge from-layer="40" from-port="1" to-layer="3298" to-port="0" />
+		<edge from-layer="41" from-port="0" to-layer="42" to-port="0" />
+		<edge from-layer="42" from-port="1" to-layer="3246" to-port="0" />
+		<edge from-layer="43" from-port="0" to-layer="44" to-port="0" />
+		<edge from-layer="44" from-port="1" to-layer="3104" to-port="0" />
+		<edge from-layer="45" from-port="0" to-layer="46" to-port="0" />
+		<edge from-layer="46" from-port="1" to-layer="3073" to-port="0" />
+		<edge from-layer="47" from-port="0" to-layer="48" to-port="0" />
+		<edge from-layer="48" from-port="1" to-layer="3060" to-port="0" />
+		<edge from-layer="49" from-port="0" to-layer="50" to-port="0" />
+		<edge from-layer="50" from-port="1" to-layer="3008" to-port="0" />
+		<edge from-layer="51" from-port="0" to-layer="52" to-port="0" />
+		<edge from-layer="52" from-port="1" to-layer="2854" to-port="0" />
+		<edge from-layer="53" from-port="0" to-layer="54" to-port="0" />
+		<edge from-layer="54" from-port="1" to-layer="2823" to-port="0" />
+		<edge from-layer="55" from-port="0" to-layer="56" to-port="0" />
+		<edge from-layer="56" from-port="1" to-layer="2810" to-port="0" />
+		<edge from-layer="57" from-port="0" to-layer="58" to-port="0" />
+		<edge from-layer="58" from-port="1" to-layer="2758" to-port="0" />
+		<edge from-layer="59" from-port="0" to-layer="60" to-port="0" />
+		<edge from-layer="60" from-port="1" to-layer="2616" to-port="0" />
+		<edge from-layer="61" from-port="0" to-layer="62" to-port="0" />
+		<edge from-layer="62" from-port="1" to-layer="2585" to-port="0" />
+		<edge from-layer="63" from-port="0" to-layer="64" to-port="0" />
+		<edge from-layer="64" from-port="1" to-layer="2572" to-port="0" />
+		<edge from-layer="65" from-port="0" to-layer="66" to-port="0" />
+		<edge from-layer="66" from-port="1" to-layer="2520" to-port="0" />
+		<edge from-layer="67" from-port="0" to-layer="68" to-port="0" />
+		<edge from-layer="68" from-port="1" to-layer="2378" to-port="0" />
+		<edge from-layer="69" from-port="0" to-layer="70" to-port="0" />
+		<edge from-layer="70" from-port="1" to-layer="2347" to-port="0" />
+		<edge from-layer="71" from-port="0" to-layer="72" to-port="0" />
+		<edge from-layer="72" from-port="1" to-layer="2334" to-port="0" />
+		<edge from-layer="73" from-port="0" to-layer="74" to-port="0" />
+		<edge from-layer="74" from-port="1" to-layer="2282" to-port="0" />
+		<edge from-layer="75" from-port="0" to-layer="76" to-port="0" />
+		<edge from-layer="76" from-port="1" to-layer="1907" to-port="0" />
+		<edge from-layer="77" from-port="0" to-layer="78" to-port="0" />
+		<edge from-layer="78" from-port="1" to-layer="1876" to-port="0" />
+		<edge from-layer="79" from-port="0" to-layer="80" to-port="0" />
+		<edge from-layer="80" from-port="1" to-layer="1863" to-port="0" />
+		<edge from-layer="81" from-port="0" to-layer="82" to-port="0" />
+		<edge from-layer="82" from-port="1" to-layer="1811" to-port="0" />
+		<edge from-layer="83" from-port="0" to-layer="84" to-port="0" />
+		<edge from-layer="84" from-port="1" to-layer="1570" to-port="0" />
+		<edge from-layer="85" from-port="0" to-layer="86" to-port="0" />
+		<edge from-layer="86" from-port="1" to-layer="1539" to-port="0" />
+		<edge from-layer="87" from-port="0" to-layer="88" to-port="0" />
+		<edge from-layer="88" from-port="1" to-layer="1526" to-port="0" />
+		<edge from-layer="89" from-port="0" to-layer="90" to-port="0" />
+		<edge from-layer="90" from-port="1" to-layer="1474" to-port="0" />
+		<edge from-layer="91" from-port="0" to-layer="92" to-port="0" />
+		<edge from-layer="92" from-port="1" to-layer="1339" to-port="0" />
+		<edge from-layer="93" from-port="0" to-layer="94" to-port="0" />
+		<edge from-layer="94" from-port="1" to-layer="1308" to-port="0" />
+		<edge from-layer="95" from-port="0" to-layer="96" to-port="0" />
+		<edge from-layer="96" from-port="1" to-layer="1295" to-port="0" />
+		<edge from-layer="97" from-port="0" to-layer="98" to-port="0" />
+		<edge from-layer="98" from-port="1" to-layer="1243" to-port="0" />
+		<edge from-layer="99" from-port="0" to-layer="100" to-port="0" />
+		<edge from-layer="100" from-port="1" to-layer="1096" to-port="0" />
+		<edge from-layer="101" from-port="0" to-layer="102" to-port="0" />
+		<edge from-layer="102" from-port="1" to-layer="1065" to-port="0" />
+		<edge from-layer="103" from-port="0" to-layer="104" to-port="0" />
+		<edge from-layer="104" from-port="1" to-layer="1052" to-port="0" />
+		<edge from-layer="105" from-port="0" to-layer="106" to-port="0" />
+		<edge from-layer="106" from-port="1" to-layer="1000" to-port="0" />
+		<edge from-layer="107" from-port="0" to-layer="108" to-port="0" />
+		<edge from-layer="108" from-port="1" to-layer="865" to-port="0" />
+		<edge from-layer="109" from-port="0" to-layer="110" to-port="0" />
+		<edge from-layer="110" from-port="1" to-layer="834" to-port="0" />
+		<edge from-layer="111" from-port="0" to-layer="112" to-port="0" />
+		<edge from-layer="112" from-port="1" to-layer="821" to-port="0" />
+		<edge from-layer="113" from-port="0" to-layer="114" to-port="0" />
+		<edge from-layer="114" from-port="1" to-layer="769" to-port="0" />
+		<edge from-layer="115" from-port="0" to-layer="116" to-port="0" />
+		<edge from-layer="116" from-port="1" to-layer="622" to-port="0" />
+		<edge from-layer="117" from-port="0" to-layer="118" to-port="0" />
+		<edge from-layer="118" from-port="1" to-layer="591" to-port="0" />
+		<edge from-layer="119" from-port="0" to-layer="120" to-port="0" />
+		<edge from-layer="120" from-port="1" to-layer="578" to-port="0" />
+		<edge from-layer="121" from-port="0" to-layer="122" to-port="0" />
+		<edge from-layer="122" from-port="1" to-layer="526" to-port="0" />
+		<edge from-layer="123" from-port="0" to-layer="124" to-port="0" />
+		<edge from-layer="124" from-port="1" to-layer="391" to-port="0" />
+		<edge from-layer="125" from-port="0" to-layer="126" to-port="0" />
+		<edge from-layer="126" from-port="1" to-layer="360" to-port="0" />
+		<edge from-layer="127" from-port="0" to-layer="128" to-port="0" />
+		<edge from-layer="128" from-port="1" to-layer="347" to-port="0" />
+		<edge from-layer="129" from-port="0" to-layer="130" to-port="0" />
+		<edge from-layer="130" from-port="1" to-layer="295" to-port="0" />
+		<edge from-layer="131" from-port="0" to-layer="132" to-port="0" />
+		<edge from-layer="132" from-port="1" to-layer="133" to-port="1" />
+		<edge from-layer="133" from-port="2" to-layer="136" to-port="0" />
+		<edge from-layer="134" from-port="0" to-layer="135" to-port="0" />
+		<edge from-layer="135" from-port="1" to-layer="136" to-port="1" />
+		<edge from-layer="136" from-port="2" to-layer="4081" to-port="1" />
+		<edge from-layer="136" from-port="2" to-layer="222" to-port="0" />
+		<edge from-layer="136" from-port="2" to-layer="141" to-port="0" />
+		<edge from-layer="136" from-port="2" to-layer="138" to-port="0" />
+		<edge from-layer="137" from-port="0" to-layer="138" to-port="1" />
+		<edge from-layer="138" from-port="2" to-layer="140" to-port="0" />
+		<edge from-layer="139" from-port="0" to-layer="140" to-port="1" />
+		<edge from-layer="140" from-port="2" to-layer="142" to-port="0" />
+		<edge from-layer="141" from-port="1" to-layer="142" to-port="1" />
+		<edge from-layer="142" from-port="2" to-layer="145" to-port="0" />
+		<edge from-layer="143" from-port="0" to-layer="144" to-port="0" />
+		<edge from-layer="144" from-port="1" to-layer="145" to-port="1" />
+		<edge from-layer="145" from-port="2" to-layer="148" to-port="0" />
+		<edge from-layer="146" from-port="0" to-layer="147" to-port="0" />
+		<edge from-layer="147" from-port="1" to-layer="148" to-port="1" />
+		<edge from-layer="148" from-port="2" to-layer="149" to-port="0" />
+		<edge from-layer="149" from-port="1" to-layer="152" to-port="0" />
+		<edge from-layer="150" from-port="0" to-layer="151" to-port="0" />
+		<edge from-layer="151" from-port="1" to-layer="152" to-port="1" />
+		<edge from-layer="152" from-port="2" to-layer="155" to-port="0" />
+		<edge from-layer="153" from-port="0" to-layer="154" to-port="0" />
+		<edge from-layer="154" from-port="1" to-layer="155" to-port="1" />
+		<edge from-layer="155" from-port="2" to-layer="202" to-port="0" />
+		<edge from-layer="156" from-port="0" to-layer="157" to-port="1" />
+		<edge from-layer="157" from-port="2" to-layer="159" to-port="0" />
+		<edge from-layer="158" from-port="0" to-layer="159" to-port="1" />
+		<edge from-layer="159" from-port="2" to-layer="161" to-port="0" />
+		<edge from-layer="160" from-port="0" to-layer="161" to-port="1" />
+		<edge from-layer="161" from-port="2" to-layer="162" to-port="0" />
+		<edge from-layer="162" from-port="1" to-layer="165" to-port="0" />
+		<edge from-layer="163" from-port="0" to-layer="164" to-port="0" />
+		<edge from-layer="164" from-port="1" to-layer="165" to-port="1" />
+		<edge from-layer="165" from-port="2" to-layer="166" to-port="0" />
+		<edge from-layer="165" from-port="2" to-layer="167" to-port="0" />
+		<edge from-layer="166" from-port="1" to-layer="168" to-port="0" />
+		<edge from-layer="167" from-port="1" to-layer="168" to-port="1" />
+		<edge from-layer="168" from-port="2" to-layer="172" to-port="0" />
+		<edge from-layer="168" from-port="2" to-layer="176" to-port="0" />
+		<edge from-layer="169" from-port="0" to-layer="172" to-port="1" />
+		<edge from-layer="170" from-port="0" to-layer="172" to-port="2" />
+		<edge from-layer="171" from-port="0" to-layer="172" to-port="3" />
+		<edge from-layer="172" from-port="4" to-layer="177" to-port="0" />
+		<edge from-layer="173" from-port="0" to-layer="176" to-port="1" />
+		<edge from-layer="174" from-port="0" to-layer="176" to-port="2" />
+		<edge from-layer="175" from-port="0" to-layer="176" to-port="3" />
+		<edge from-layer="176" from-port="4" to-layer="177" to-port="1" />
+		<edge from-layer="177" from-port="2" to-layer="180" to-port="0" />
+		<edge from-layer="178" from-port="0" to-layer="179" to-port="0" />
+		<edge from-layer="179" from-port="1" to-layer="180" to-port="1" />
+		<edge from-layer="180" from-port="2" to-layer="183" to-port="0" />
+		<edge from-layer="181" from-port="0" to-layer="182" to-port="0" />
+		<edge from-layer="182" from-port="1" to-layer="183" to-port="1" />
+		<edge from-layer="183" from-port="2" to-layer="184" to-port="0" />
+		<edge from-layer="184" from-port="1" to-layer="187" to-port="0" />
+		<edge from-layer="185" from-port="0" to-layer="186" to-port="0" />
+		<edge from-layer="186" from-port="1" to-layer="187" to-port="1" />
+		<edge from-layer="187" from-port="2" to-layer="190" to-port="0" />
+		<edge from-layer="188" from-port="0" to-layer="189" to-port="0" />
+		<edge from-layer="189" from-port="1" to-layer="190" to-port="1" />
+		<edge from-layer="190" from-port="2" to-layer="191" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="194" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="2907" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="425" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="4109" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="3871" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="3633" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="3383" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="3145" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="668" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="899" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1142" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1373" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1610" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1660" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1710" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1941" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="1998" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="2055" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="2112" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="2181" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="2419" to-port="0" />
+		<edge from-layer="191" from-port="1" to-layer="2657" to-port="0" />
+		<edge from-layer="192" from-port="0" to-layer="193" to-port="0" />
+		<edge from-layer="193" from-port="1" to-layer="194" to-port="1" />
+		<edge from-layer="194" from-port="2" to-layer="197" to-port="0" />
+		<edge from-layer="195" from-port="0" to-layer="196" to-port="0" />
+		<edge from-layer="196" from-port="1" to-layer="197" to-port="1" />
+		<edge from-layer="197" from-port="2" to-layer="199" to-port="0" />
+		<edge from-layer="198" from-port="0" to-layer="199" to-port="1" />
+		<edge from-layer="199" from-port="2" to-layer="201" to-port="0" />
+		<edge from-layer="200" from-port="0" to-layer="201" to-port="1" />
+		<edge from-layer="201" from-port="2" to-layer="202" to-port="1" />
+		<edge from-layer="202" from-port="2" to-layer="204" to-port="0" />
+		<edge from-layer="202" from-port="2" to-layer="207" to-port="0" />
+		<edge from-layer="203" from-port="0" to-layer="204" to-port="1" />
+		<edge from-layer="204" from-port="2" to-layer="206" to-port="0" />
+		<edge from-layer="205" from-port="0" to-layer="206" to-port="1" />
+		<edge from-layer="206" from-port="2" to-layer="208" to-port="0" />
+		<edge from-layer="207" from-port="1" to-layer="208" to-port="1" />
+		<edge from-layer="208" from-port="2" to-layer="211" to-port="0" />
+		<edge from-layer="209" from-port="0" to-layer="210" to-port="0" />
+		<edge from-layer="210" from-port="1" to-layer="211" to-port="1" />
+		<edge from-layer="211" from-port="2" to-layer="214" to-port="0" />
+		<edge from-layer="212" from-port="0" to-layer="213" to-port="0" />
+		<edge from-layer="213" from-port="1" to-layer="214" to-port="1" />
+		<edge from-layer="214" from-port="2" to-layer="215" to-port="0" />
+		<edge from-layer="215" from-port="1" to-layer="218" to-port="0" />
+		<edge from-layer="216" from-port="0" to-layer="217" to-port="0" />
+		<edge from-layer="217" from-port="1" to-layer="218" to-port="1" />
+		<edge from-layer="218" from-port="2" to-layer="221" to-port="0" />
+		<edge from-layer="219" from-port="0" to-layer="220" to-port="0" />
+		<edge from-layer="220" from-port="1" to-layer="221" to-port="1" />
+		<edge from-layer="221" from-port="2" to-layer="222" to-port="1" />
+		<edge from-layer="222" from-port="2" to-layer="224" to-port="0" />
+		<edge from-layer="222" from-port="2" to-layer="227" to-port="0" />
+		<edge from-layer="222" from-port="2" to-layer="403" to-port="1" />
+		<edge from-layer="223" from-port="0" to-layer="224" to-port="1" />
+		<edge from-layer="224" from-port="2" to-layer="226" to-port="0" />
+		<edge from-layer="225" from-port="0" to-layer="226" to-port="1" />
+		<edge from-layer="226" from-port="2" to-layer="228" to-port="0" />
+		<edge from-layer="227" from-port="1" to-layer="228" to-port="1" />
+		<edge from-layer="228" from-port="2" to-layer="231" to-port="0" />
+		<edge from-layer="229" from-port="0" to-layer="230" to-port="0" />
+		<edge from-layer="230" from-port="1" to-layer="231" to-port="1" />
+		<edge from-layer="231" from-port="2" to-layer="234" to-port="0" />
+		<edge from-layer="232" from-port="0" to-layer="233" to-port="0" />
+		<edge from-layer="233" from-port="1" to-layer="234" to-port="1" />
+		<edge from-layer="234" from-port="2" to-layer="237" to-port="0" />
+		<edge from-layer="235" from-port="0" to-layer="236" to-port="0" />
+		<edge from-layer="236" from-port="1" to-layer="237" to-port="1" />
+		<edge from-layer="237" from-port="2" to-layer="240" to-port="0" />
+		<edge from-layer="238" from-port="0" to-layer="239" to-port="0" />
+		<edge from-layer="239" from-port="1" to-layer="240" to-port="1" />
+		<edge from-layer="240" from-port="2" to-layer="242" to-port="0" />
+		<edge from-layer="241" from-port="0" to-layer="242" to-port="1" />
+		<edge from-layer="242" from-port="2" to-layer="244" to-port="0" />
+		<edge from-layer="243" from-port="0" to-layer="244" to-port="1" />
+		<edge from-layer="244" from-port="2" to-layer="246" to-port="0" />
+		<edge from-layer="244" from-port="2" to-layer="296" to-port="1" />
+		<edge from-layer="245" from-port="0" to-layer="246" to-port="1" />
+		<edge from-layer="246" from-port="2" to-layer="249" to-port="0" />
+		<edge from-layer="247" from-port="0" to-layer="248" to-port="0" />
+		<edge from-layer="248" from-port="1" to-layer="249" to-port="1" />
+		<edge from-layer="249" from-port="2" to-layer="252" to-port="0" />
+		<edge from-layer="250" from-port="0" to-layer="251" to-port="0" />
+		<edge from-layer="251" from-port="1" to-layer="252" to-port="1" />
+		<edge from-layer="252" from-port="2" to-layer="264" to-port="0" />
+		<edge from-layer="252" from-port="2" to-layer="278" to-port="0" />
+		<edge from-layer="252" from-port="2" to-layer="255" to-port="0" />
+		<edge from-layer="253" from-port="0" to-layer="254" to-port="0" />
+		<edge from-layer="254" from-port="1" to-layer="255" to-port="1" />
+		<edge from-layer="255" from-port="2" to-layer="257" to-port="0" />
+		<edge from-layer="256" from-port="0" to-layer="257" to-port="1" />
+		<edge from-layer="257" from-port="2" to-layer="259" to-port="0" />
+		<edge from-layer="258" from-port="0" to-layer="259" to-port="1" />
+		<edge from-layer="259" from-port="2" to-layer="261" to-port="0" />
+		<edge from-layer="260" from-port="0" to-layer="261" to-port="1" />
+		<edge from-layer="261" from-port="2" to-layer="274" to-port="0" />
+		<edge from-layer="262" from-port="0" to-layer="263" to-port="0" />
+		<edge from-layer="263" from-port="1" to-layer="264" to-port="1" />
+		<edge from-layer="264" from-port="2" to-layer="266" to-port="0" />
+		<edge from-layer="265" from-port="0" to-layer="266" to-port="1" />
+		<edge from-layer="266" from-port="2" to-layer="268" to-port="0" />
+		<edge from-layer="267" from-port="0" to-layer="268" to-port="1" />
+		<edge from-layer="268" from-port="2" to-layer="270" to-port="0" />
+		<edge from-layer="269" from-port="0" to-layer="270" to-port="1" />
+		<edge from-layer="270" from-port="2" to-layer="273" to-port="0" />
+		<edge from-layer="271" from-port="0" to-layer="272" to-port="0" />
+		<edge from-layer="272" from-port="1" to-layer="273" to-port="1" />
+		<edge from-layer="273" from-port="2" to-layer="274" to-port="1" />
+		<edge from-layer="274" from-port="2" to-layer="275" to-port="0" />
+		<edge from-layer="275" from-port="1" to-layer="285" to-port="0" />
+		<edge from-layer="276" from-port="0" to-layer="277" to-port="0" />
+		<edge from-layer="277" from-port="1" to-layer="278" to-port="1" />
+		<edge from-layer="278" from-port="2" to-layer="280" to-port="0" />
+		<edge from-layer="279" from-port="0" to-layer="280" to-port="1" />
+		<edge from-layer="280" from-port="2" to-layer="282" to-port="0" />
+		<edge from-layer="281" from-port="0" to-layer="282" to-port="1" />
+		<edge from-layer="282" from-port="2" to-layer="284" to-port="0" />
+		<edge from-layer="283" from-port="0" to-layer="284" to-port="1" />
+		<edge from-layer="284" from-port="2" to-layer="285" to-port="1" />
+		<edge from-layer="285" from-port="2" to-layer="287" to-port="0" />
+		<edge from-layer="286" from-port="0" to-layer="287" to-port="1" />
+		<edge from-layer="287" from-port="2" to-layer="289" to-port="0" />
+		<edge from-layer="288" from-port="0" to-layer="289" to-port="1" />
+		<edge from-layer="289" from-port="2" to-layer="291" to-port="0" />
+		<edge from-layer="290" from-port="0" to-layer="291" to-port="1" />
+		<edge from-layer="291" from-port="2" to-layer="294" to-port="0" />
+		<edge from-layer="292" from-port="0" to-layer="293" to-port="0" />
+		<edge from-layer="293" from-port="1" to-layer="294" to-port="1" />
+		<edge from-layer="294" from-port="2" to-layer="295" to-port="1" />
+		<edge from-layer="295" from-port="2" to-layer="296" to-port="0" />
+		<edge from-layer="296" from-port="2" to-layer="348" to-port="1" />
+		<edge from-layer="296" from-port="2" to-layer="298" to-port="0" />
+		<edge from-layer="297" from-port="0" to-layer="298" to-port="1" />
+		<edge from-layer="298" from-port="2" to-layer="301" to-port="0" />
+		<edge from-layer="299" from-port="0" to-layer="300" to-port="0" />
+		<edge from-layer="300" from-port="1" to-layer="301" to-port="1" />
+		<edge from-layer="301" from-port="2" to-layer="304" to-port="0" />
+		<edge from-layer="302" from-port="0" to-layer="303" to-port="0" />
+		<edge from-layer="303" from-port="1" to-layer="304" to-port="1" />
+		<edge from-layer="304" from-port="2" to-layer="307" to-port="0" />
+		<edge from-layer="305" from-port="0" to-layer="306" to-port="0" />
+		<edge from-layer="306" from-port="1" to-layer="307" to-port="1" />
+		<edge from-layer="307" from-port="2" to-layer="309" to-port="0" />
+		<edge from-layer="308" from-port="0" to-layer="309" to-port="1" />
+		<edge from-layer="309" from-port="2" to-layer="311" to-port="0" />
+		<edge from-layer="310" from-port="0" to-layer="311" to-port="1" />
+		<edge from-layer="311" from-port="2" to-layer="313" to-port="0" />
+		<edge from-layer="312" from-port="0" to-layer="313" to-port="1" />
+		<edge from-layer="313" from-port="2" to-layer="326" to-port="0" />
+		<edge from-layer="314" from-port="0" to-layer="315" to-port="0" />
+		<edge from-layer="315" from-port="1" to-layer="316" to-port="1" />
+		<edge from-layer="316" from-port="2" to-layer="318" to-port="0" />
+		<edge from-layer="317" from-port="0" to-layer="318" to-port="1" />
+		<edge from-layer="318" from-port="2" to-layer="320" to-port="0" />
+		<edge from-layer="319" from-port="0" to-layer="320" to-port="1" />
+		<edge from-layer="320" from-port="2" to-layer="322" to-port="0" />
+		<edge from-layer="321" from-port="0" to-layer="322" to-port="1" />
+		<edge from-layer="322" from-port="2" to-layer="325" to-port="0" />
+		<edge from-layer="323" from-port="0" to-layer="324" to-port="0" />
+		<edge from-layer="324" from-port="1" to-layer="325" to-port="1" />
+		<edge from-layer="325" from-port="2" to-layer="326" to-port="1" />
+		<edge from-layer="326" from-port="2" to-layer="327" to-port="0" />
+		<edge from-layer="327" from-port="1" to-layer="337" to-port="0" />
+		<edge from-layer="328" from-port="0" to-layer="329" to-port="0" />
+		<edge from-layer="329" from-port="1" to-layer="330" to-port="1" />
+		<edge from-layer="330" from-port="2" to-layer="332" to-port="0" />
+		<edge from-layer="331" from-port="0" to-layer="332" to-port="1" />
+		<edge from-layer="332" from-port="2" to-layer="334" to-port="0" />
+		<edge from-layer="333" from-port="0" to-layer="334" to-port="1" />
+		<edge from-layer="334" from-port="2" to-layer="336" to-port="0" />
+		<edge from-layer="335" from-port="0" to-layer="336" to-port="1" />
+		<edge from-layer="336" from-port="2" to-layer="337" to-port="1" />
+		<edge from-layer="337" from-port="2" to-layer="339" to-port="0" />
+		<edge from-layer="338" from-port="0" to-layer="339" to-port="1" />
+		<edge from-layer="339" from-port="2" to-layer="341" to-port="0" />
+		<edge from-layer="340" from-port="0" to-layer="341" to-port="1" />
+		<edge from-layer="341" from-port="2" to-layer="343" to-port="0" />
+		<edge from-layer="342" from-port="0" to-layer="343" to-port="1" />
+		<edge from-layer="343" from-port="2" to-layer="346" to-port="0" />
+		<edge from-layer="344" from-port="0" to-layer="345" to-port="0" />
+		<edge from-layer="345" from-port="1" to-layer="346" to-port="1" />
+		<edge from-layer="346" from-port="2" to-layer="347" to-port="1" />
+		<edge from-layer="347" from-port="2" to-layer="348" to-port="0" />
+		<edge from-layer="348" from-port="2" to-layer="350" to-port="0" />
+		<edge from-layer="348" from-port="2" to-layer="392" to-port="1" />
+		<edge from-layer="349" from-port="0" to-layer="350" to-port="1" />
+		<edge from-layer="350" from-port="2" to-layer="353" to-port="0" />
+		<edge from-layer="351" from-port="0" to-layer="352" to-port="0" />
+		<edge from-layer="352" from-port="1" to-layer="353" to-port="1" />
+		<edge from-layer="353" from-port="2" to-layer="356" to-port="0" />
+		<edge from-layer="354" from-port="0" to-layer="355" to-port="0" />
+		<edge from-layer="355" from-port="1" to-layer="356" to-port="1" />
+		<edge from-layer="356" from-port="2" to-layer="359" to-port="0" />
+		<edge from-layer="357" from-port="0" to-layer="358" to-port="0" />
+		<edge from-layer="358" from-port="1" to-layer="359" to-port="1" />
+		<edge from-layer="359" from-port="2" to-layer="360" to-port="1" />
+		<edge from-layer="360" from-port="2" to-layer="364" to-port="0" />
+		<edge from-layer="360" from-port="2" to-layer="385" to-port="0" />
+		<edge from-layer="360" from-port="2" to-layer="375" to-port="0" />
+		<edge from-layer="361" from-port="0" to-layer="375" to-port="1" />
+		<edge from-layer="362" from-port="0" to-layer="373" to-port="0" />
+		<edge from-layer="363" from-port="0" to-layer="373" to-port="1" />
+		<edge from-layer="364" from-port="1" to-layer="367" to-port="0" />
+		<edge from-layer="365" from-port="0" to-layer="367" to-port="1" />
+		<edge from-layer="366" from-port="0" to-layer="367" to-port="2" />
+		<edge from-layer="367" from-port="3" to-layer="369" to-port="0" />
+		<edge from-layer="368" from-port="0" to-layer="369" to-port="1" />
+		<edge from-layer="369" from-port="2" to-layer="371" to-port="0" />
+		<edge from-layer="370" from-port="0" to-layer="371" to-port="1" />
+		<edge from-layer="371" from-port="2" to-layer="379" to-port="2" />
+		<edge from-layer="371" from-port="2" to-layer="382" to-port="0" />
+		<edge from-layer="371" from-port="2" to-layer="373" to-port="2" />
+		<edge from-layer="372" from-port="0" to-layer="373" to-port="3" />
+		<edge from-layer="373" from-port="4" to-layer="375" to-port="2" />
+		<edge from-layer="374" from-port="0" to-layer="375" to-port="3" />
+		<edge from-layer="375" from-port="4" to-layer="387" to-port="0" />
+		<edge from-layer="376" from-port="0" to-layer="379" to-port="0" />
+		<edge from-layer="377" from-port="0" to-layer="383" to-port="1" />
+		<edge from-layer="377" from-port="0" to-layer="379" to-port="1" />
+		<edge from-layer="378" from-port="0" to-layer="383" to-port="3" />
+		<edge from-layer="378" from-port="0" to-layer="379" to-port="3" />
+		<edge from-layer="379" from-port="4" to-layer="385" to-port="1" />
+		<edge from-layer="380" from-port="0" to-layer="383" to-port="0" />
+		<edge from-layer="381" from-port="0" to-layer="382" to-port="1" />
+		<edge from-layer="382" from-port="2" to-layer="383" to-port="2" />
+		<edge from-layer="383" from-port="4" to-layer="385" to-port="2" />
+		<edge from-layer="384" from-port="0" to-layer="385" to-port="3" />
+		<edge from-layer="385" from-port="4" to-layer="386" to-port="0" />
+		<edge from-layer="386" from-port="1" to-layer="387" to-port="1" />
+		<edge from-layer="387" from-port="2" to-layer="390" to-port="0" />
+		<edge from-layer="388" from-port="0" to-layer="389" to-port="0" />
+		<edge from-layer="389" from-port="1" to-layer="390" to-port="1" />
+		<edge from-layer="390" from-port="2" to-layer="391" to-port="1" />
+		<edge from-layer="391" from-port="2" to-layer="392" to-port="0" />
+		<edge from-layer="392" from-port="2" to-layer="394" to-port="0" />
+		<edge from-layer="393" from-port="0" to-layer="394" to-port="1" />
+		<edge from-layer="394" from-port="2" to-layer="396" to-port="0" />
+		<edge from-layer="395" from-port="0" to-layer="396" to-port="1" />
+		<edge from-layer="396" from-port="2" to-layer="399" to-port="0" />
+		<edge from-layer="397" from-port="0" to-layer="398" to-port="0" />
+		<edge from-layer="398" from-port="1" to-layer="399" to-port="1" />
+		<edge from-layer="399" from-port="2" to-layer="402" to-port="0" />
+		<edge from-layer="400" from-port="0" to-layer="401" to-port="0" />
+		<edge from-layer="401" from-port="1" to-layer="402" to-port="1" />
+		<edge from-layer="402" from-port="2" to-layer="403" to-port="0" />
+		<edge from-layer="403" from-port="2" to-layer="405" to-port="0" />
+		<edge from-layer="403" from-port="2" to-layer="408" to-port="0" />
+		<edge from-layer="403" from-port="2" to-layer="3843" to-port="1" />
+		<edge from-layer="403" from-port="2" to-layer="453" to-port="0" />
+		<edge from-layer="404" from-port="0" to-layer="405" to-port="1" />
+		<edge from-layer="405" from-port="2" to-layer="407" to-port="0" />
+		<edge from-layer="406" from-port="0" to-layer="407" to-port="1" />
+		<edge from-layer="407" from-port="2" to-layer="409" to-port="0" />
+		<edge from-layer="408" from-port="1" to-layer="409" to-port="1" />
+		<edge from-layer="409" from-port="2" to-layer="412" to-port="0" />
+		<edge from-layer="410" from-port="0" to-layer="411" to-port="0" />
+		<edge from-layer="411" from-port="1" to-layer="412" to-port="1" />
+		<edge from-layer="412" from-port="2" to-layer="415" to-port="0" />
+		<edge from-layer="413" from-port="0" to-layer="414" to-port="0" />
+		<edge from-layer="414" from-port="1" to-layer="415" to-port="1" />
+		<edge from-layer="415" from-port="2" to-layer="416" to-port="0" />
+		<edge from-layer="416" from-port="1" to-layer="419" to-port="0" />
+		<edge from-layer="417" from-port="0" to-layer="418" to-port="0" />
+		<edge from-layer="418" from-port="1" to-layer="419" to-port="1" />
+		<edge from-layer="419" from-port="2" to-layer="422" to-port="0" />
+		<edge from-layer="420" from-port="0" to-layer="421" to-port="0" />
+		<edge from-layer="421" from-port="1" to-layer="422" to-port="1" />
+		<edge from-layer="422" from-port="2" to-layer="433" to-port="0" />
+		<edge from-layer="423" from-port="0" to-layer="424" to-port="0" />
+		<edge from-layer="424" from-port="1" to-layer="425" to-port="1" />
+		<edge from-layer="425" from-port="2" to-layer="428" to-port="0" />
+		<edge from-layer="426" from-port="0" to-layer="427" to-port="0" />
+		<edge from-layer="427" from-port="1" to-layer="428" to-port="1" />
+		<edge from-layer="428" from-port="2" to-layer="430" to-port="0" />
+		<edge from-layer="429" from-port="0" to-layer="430" to-port="1" />
+		<edge from-layer="430" from-port="2" to-layer="432" to-port="0" />
+		<edge from-layer="431" from-port="0" to-layer="432" to-port="1" />
+		<edge from-layer="432" from-port="2" to-layer="433" to-port="1" />
+		<edge from-layer="433" from-port="2" to-layer="438" to-port="0" />
+		<edge from-layer="433" from-port="2" to-layer="435" to-port="0" />
+		<edge from-layer="434" from-port="0" to-layer="435" to-port="1" />
+		<edge from-layer="435" from-port="2" to-layer="437" to-port="0" />
+		<edge from-layer="436" from-port="0" to-layer="437" to-port="1" />
+		<edge from-layer="437" from-port="2" to-layer="439" to-port="0" />
+		<edge from-layer="438" from-port="1" to-layer="439" to-port="1" />
+		<edge from-layer="439" from-port="2" to-layer="442" to-port="0" />
+		<edge from-layer="440" from-port="0" to-layer="441" to-port="0" />
+		<edge from-layer="441" from-port="1" to-layer="442" to-port="1" />
+		<edge from-layer="442" from-port="2" to-layer="445" to-port="0" />
+		<edge from-layer="443" from-port="0" to-layer="444" to-port="0" />
+		<edge from-layer="444" from-port="1" to-layer="445" to-port="1" />
+		<edge from-layer="445" from-port="2" to-layer="446" to-port="0" />
+		<edge from-layer="446" from-port="1" to-layer="449" to-port="0" />
+		<edge from-layer="447" from-port="0" to-layer="448" to-port="0" />
+		<edge from-layer="448" from-port="1" to-layer="449" to-port="1" />
+		<edge from-layer="449" from-port="2" to-layer="452" to-port="0" />
+		<edge from-layer="450" from-port="0" to-layer="451" to-port="0" />
+		<edge from-layer="451" from-port="1" to-layer="452" to-port="1" />
+		<edge from-layer="452" from-port="2" to-layer="453" to-port="1" />
+		<edge from-layer="453" from-port="2" to-layer="634" to-port="1" />
+		<edge from-layer="453" from-port="2" to-layer="458" to-port="0" />
+		<edge from-layer="453" from-port="2" to-layer="455" to-port="0" />
+		<edge from-layer="454" from-port="0" to-layer="455" to-port="1" />
+		<edge from-layer="455" from-port="2" to-layer="457" to-port="0" />
+		<edge from-layer="456" from-port="0" to-layer="457" to-port="1" />
+		<edge from-layer="457" from-port="2" to-layer="459" to-port="0" />
+		<edge from-layer="458" from-port="1" to-layer="459" to-port="1" />
+		<edge from-layer="459" from-port="2" to-layer="462" to-port="0" />
+		<edge from-layer="460" from-port="0" to-layer="461" to-port="0" />
+		<edge from-layer="461" from-port="1" to-layer="462" to-port="1" />
+		<edge from-layer="462" from-port="2" to-layer="465" to-port="0" />
+		<edge from-layer="463" from-port="0" to-layer="464" to-port="0" />
+		<edge from-layer="464" from-port="1" to-layer="465" to-port="1" />
+		<edge from-layer="465" from-port="2" to-layer="468" to-port="0" />
+		<edge from-layer="466" from-port="0" to-layer="467" to-port="0" />
+		<edge from-layer="467" from-port="1" to-layer="468" to-port="1" />
+		<edge from-layer="468" from-port="2" to-layer="471" to-port="0" />
+		<edge from-layer="469" from-port="0" to-layer="470" to-port="0" />
+		<edge from-layer="470" from-port="1" to-layer="471" to-port="1" />
+		<edge from-layer="471" from-port="2" to-layer="473" to-port="0" />
+		<edge from-layer="472" from-port="0" to-layer="473" to-port="1" />
+		<edge from-layer="473" from-port="2" to-layer="475" to-port="0" />
+		<edge from-layer="474" from-port="0" to-layer="475" to-port="1" />
+		<edge from-layer="475" from-port="2" to-layer="477" to-port="0" />
+		<edge from-layer="475" from-port="2" to-layer="527" to-port="1" />
+		<edge from-layer="476" from-port="0" to-layer="477" to-port="1" />
+		<edge from-layer="477" from-port="2" to-layer="480" to-port="0" />
+		<edge from-layer="478" from-port="0" to-layer="479" to-port="0" />
+		<edge from-layer="479" from-port="1" to-layer="480" to-port="1" />
+		<edge from-layer="480" from-port="2" to-layer="483" to-port="0" />
+		<edge from-layer="481" from-port="0" to-layer="482" to-port="0" />
+		<edge from-layer="482" from-port="1" to-layer="483" to-port="1" />
+		<edge from-layer="483" from-port="2" to-layer="509" to-port="0" />
+		<edge from-layer="483" from-port="2" to-layer="486" to-port="0" />
+		<edge from-layer="483" from-port="2" to-layer="495" to-port="0" />
+		<edge from-layer="484" from-port="0" to-layer="485" to-port="0" />
+		<edge from-layer="485" from-port="1" to-layer="486" to-port="1" />
+		<edge from-layer="486" from-port="2" to-layer="488" to-port="0" />
+		<edge from-layer="487" from-port="0" to-layer="488" to-port="1" />
+		<edge from-layer="488" from-port="2" to-layer="490" to-port="0" />
+		<edge from-layer="489" from-port="0" to-layer="490" to-port="1" />
+		<edge from-layer="490" from-port="2" to-layer="492" to-port="0" />
+		<edge from-layer="491" from-port="0" to-layer="492" to-port="1" />
+		<edge from-layer="492" from-port="2" to-layer="505" to-port="0" />
+		<edge from-layer="493" from-port="0" to-layer="494" to-port="0" />
+		<edge from-layer="494" from-port="1" to-layer="495" to-port="1" />
+		<edge from-layer="495" from-port="2" to-layer="497" to-port="0" />
+		<edge from-layer="496" from-port="0" to-layer="497" to-port="1" />
+		<edge from-layer="497" from-port="2" to-layer="499" to-port="0" />
+		<edge from-layer="498" from-port="0" to-layer="499" to-port="1" />
+		<edge from-layer="499" from-port="2" to-layer="501" to-port="0" />
+		<edge from-layer="500" from-port="0" to-layer="501" to-port="1" />
+		<edge from-layer="501" from-port="2" to-layer="504" to-port="0" />
+		<edge from-layer="502" from-port="0" to-layer="503" to-port="0" />
+		<edge from-layer="503" from-port="1" to-layer="504" to-port="1" />
+		<edge from-layer="504" from-port="2" to-layer="505" to-port="1" />
+		<edge from-layer="505" from-port="2" to-layer="506" to-port="0" />
+		<edge from-layer="506" from-port="1" to-layer="516" to-port="0" />
+		<edge from-layer="507" from-port="0" to-layer="508" to-port="0" />
+		<edge from-layer="508" from-port="1" to-layer="509" to-port="1" />
+		<edge from-layer="509" from-port="2" to-layer="511" to-port="0" />
+		<edge from-layer="510" from-port="0" to-layer="511" to-port="1" />
+		<edge from-layer="511" from-port="2" to-layer="513" to-port="0" />
+		<edge from-layer="512" from-port="0" to-layer="513" to-port="1" />
+		<edge from-layer="513" from-port="2" to-layer="515" to-port="0" />
+		<edge from-layer="514" from-port="0" to-layer="515" to-port="1" />
+		<edge from-layer="515" from-port="2" to-layer="516" to-port="1" />
+		<edge from-layer="516" from-port="2" to-layer="518" to-port="0" />
+		<edge from-layer="517" from-port="0" to-layer="518" to-port="1" />
+		<edge from-layer="518" from-port="2" to-layer="520" to-port="0" />
+		<edge from-layer="519" from-port="0" to-layer="520" to-port="1" />
+		<edge from-layer="520" from-port="2" to-layer="522" to-port="0" />
+		<edge from-layer="521" from-port="0" to-layer="522" to-port="1" />
+		<edge from-layer="522" from-port="2" to-layer="525" to-port="0" />
+		<edge from-layer="523" from-port="0" to-layer="524" to-port="0" />
+		<edge from-layer="524" from-port="1" to-layer="525" to-port="1" />
+		<edge from-layer="525" from-port="2" to-layer="526" to-port="1" />
+		<edge from-layer="526" from-port="2" to-layer="527" to-port="0" />
+		<edge from-layer="527" from-port="2" to-layer="529" to-port="0" />
+		<edge from-layer="527" from-port="2" to-layer="579" to-port="1" />
+		<edge from-layer="528" from-port="0" to-layer="529" to-port="1" />
+		<edge from-layer="529" from-port="2" to-layer="532" to-port="0" />
+		<edge from-layer="530" from-port="0" to-layer="531" to-port="0" />
+		<edge from-layer="531" from-port="1" to-layer="532" to-port="1" />
+		<edge from-layer="532" from-port="2" to-layer="535" to-port="0" />
+		<edge from-layer="533" from-port="0" to-layer="534" to-port="0" />
+		<edge from-layer="534" from-port="1" to-layer="535" to-port="1" />
+		<edge from-layer="535" from-port="2" to-layer="538" to-port="0" />
+		<edge from-layer="536" from-port="0" to-layer="537" to-port="0" />
+		<edge from-layer="537" from-port="1" to-layer="538" to-port="1" />
+		<edge from-layer="538" from-port="2" to-layer="540" to-port="0" />
+		<edge from-layer="539" from-port="0" to-layer="540" to-port="1" />
+		<edge from-layer="540" from-port="2" to-layer="542" to-port="0" />
+		<edge from-layer="541" from-port="0" to-layer="542" to-port="1" />
+		<edge from-layer="542" from-port="2" to-layer="544" to-port="0" />
+		<edge from-layer="543" from-port="0" to-layer="544" to-port="1" />
+		<edge from-layer="544" from-port="2" to-layer="557" to-port="0" />
+		<edge from-layer="545" from-port="0" to-layer="546" to-port="0" />
+		<edge from-layer="546" from-port="1" to-layer="547" to-port="1" />
+		<edge from-layer="547" from-port="2" to-layer="549" to-port="0" />
+		<edge from-layer="548" from-port="0" to-layer="549" to-port="1" />
+		<edge from-layer="549" from-port="2" to-layer="551" to-port="0" />
+		<edge from-layer="550" from-port="0" to-layer="551" to-port="1" />
+		<edge from-layer="551" from-port="2" to-layer="553" to-port="0" />
+		<edge from-layer="552" from-port="0" to-layer="553" to-port="1" />
+		<edge from-layer="553" from-port="2" to-layer="556" to-port="0" />
+		<edge from-layer="554" from-port="0" to-layer="555" to-port="0" />
+		<edge from-layer="555" from-port="1" to-layer="556" to-port="1" />
+		<edge from-layer="556" from-port="2" to-layer="557" to-port="1" />
+		<edge from-layer="557" from-port="2" to-layer="558" to-port="0" />
+		<edge from-layer="558" from-port="1" to-layer="568" to-port="0" />
+		<edge from-layer="559" from-port="0" to-layer="560" to-port="0" />
+		<edge from-layer="560" from-port="1" to-layer="561" to-port="1" />
+		<edge from-layer="561" from-port="2" to-layer="563" to-port="0" />
+		<edge from-layer="562" from-port="0" to-layer="563" to-port="1" />
+		<edge from-layer="563" from-port="2" to-layer="565" to-port="0" />
+		<edge from-layer="564" from-port="0" to-layer="565" to-port="1" />
+		<edge from-layer="565" from-port="2" to-layer="567" to-port="0" />
+		<edge from-layer="566" from-port="0" to-layer="567" to-port="1" />
+		<edge from-layer="567" from-port="2" to-layer="568" to-port="1" />
+		<edge from-layer="568" from-port="2" to-layer="570" to-port="0" />
+		<edge from-layer="569" from-port="0" to-layer="570" to-port="1" />
+		<edge from-layer="570" from-port="2" to-layer="572" to-port="0" />
+		<edge from-layer="571" from-port="0" to-layer="572" to-port="1" />
+		<edge from-layer="572" from-port="2" to-layer="574" to-port="0" />
+		<edge from-layer="573" from-port="0" to-layer="574" to-port="1" />
+		<edge from-layer="574" from-port="2" to-layer="577" to-port="0" />
+		<edge from-layer="575" from-port="0" to-layer="576" to-port="0" />
+		<edge from-layer="576" from-port="1" to-layer="577" to-port="1" />
+		<edge from-layer="577" from-port="2" to-layer="578" to-port="1" />
+		<edge from-layer="578" from-port="2" to-layer="579" to-port="0" />
+		<edge from-layer="579" from-port="2" to-layer="581" to-port="0" />
+		<edge from-layer="579" from-port="2" to-layer="623" to-port="1" />
+		<edge from-layer="580" from-port="0" to-layer="581" to-port="1" />
+		<edge from-layer="581" from-port="2" to-layer="584" to-port="0" />
+		<edge from-layer="582" from-port="0" to-layer="583" to-port="0" />
+		<edge from-layer="583" from-port="1" to-layer="584" to-port="1" />
+		<edge from-layer="584" from-port="2" to-layer="587" to-port="0" />
+		<edge from-layer="585" from-port="0" to-layer="586" to-port="0" />
+		<edge from-layer="586" from-port="1" to-layer="587" to-port="1" />
+		<edge from-layer="587" from-port="2" to-layer="590" to-port="0" />
+		<edge from-layer="588" from-port="0" to-layer="589" to-port="0" />
+		<edge from-layer="589" from-port="1" to-layer="590" to-port="1" />
+		<edge from-layer="590" from-port="2" to-layer="591" to-port="1" />
+		<edge from-layer="591" from-port="2" to-layer="616" to-port="0" />
+		<edge from-layer="591" from-port="2" to-layer="595" to-port="0" />
+		<edge from-layer="591" from-port="2" to-layer="606" to-port="0" />
+		<edge from-layer="592" from-port="0" to-layer="606" to-port="1" />
+		<edge from-layer="593" from-port="0" to-layer="604" to-port="0" />
+		<edge from-layer="594" from-port="0" to-layer="604" to-port="1" />
+		<edge from-layer="595" from-port="1" to-layer="598" to-port="0" />
+		<edge from-layer="596" from-port="0" to-layer="598" to-port="1" />
+		<edge from-layer="597" from-port="0" to-layer="598" to-port="2" />
+		<edge from-layer="598" from-port="3" to-layer="600" to-port="0" />
+		<edge from-layer="599" from-port="0" to-layer="600" to-port="1" />
+		<edge from-layer="600" from-port="2" to-layer="602" to-port="0" />
+		<edge from-layer="601" from-port="0" to-layer="602" to-port="1" />
+		<edge from-layer="602" from-port="2" to-layer="604" to-port="2" />
+		<edge from-layer="602" from-port="2" to-layer="613" to-port="0" />
+		<edge from-layer="602" from-port="2" to-layer="610" to-port="2" />
+		<edge from-layer="603" from-port="0" to-layer="604" to-port="3" />
+		<edge from-layer="604" from-port="4" to-layer="606" to-port="2" />
+		<edge from-layer="605" from-port="0" to-layer="606" to-port="3" />
+		<edge from-layer="606" from-port="4" to-layer="618" to-port="0" />
+		<edge from-layer="607" from-port="0" to-layer="610" to-port="0" />
+		<edge from-layer="608" from-port="0" to-layer="614" to-port="1" />
+		<edge from-layer="608" from-port="0" to-layer="610" to-port="1" />
+		<edge from-layer="609" from-port="0" to-layer="610" to-port="3" />
+		<edge from-layer="609" from-port="0" to-layer="614" to-port="3" />
+		<edge from-layer="610" from-port="4" to-layer="616" to-port="1" />
+		<edge from-layer="611" from-port="0" to-layer="614" to-port="0" />
+		<edge from-layer="612" from-port="0" to-layer="613" to-port="1" />
+		<edge from-layer="613" from-port="2" to-layer="614" to-port="2" />
+		<edge from-layer="614" from-port="4" to-layer="616" to-port="2" />
+		<edge from-layer="615" from-port="0" to-layer="616" to-port="3" />
+		<edge from-layer="616" from-port="4" to-layer="617" to-port="0" />
+		<edge from-layer="617" from-port="1" to-layer="618" to-port="1" />
+		<edge from-layer="618" from-port="2" to-layer="621" to-port="0" />
+		<edge from-layer="619" from-port="0" to-layer="620" to-port="0" />
+		<edge from-layer="620" from-port="1" to-layer="621" to-port="1" />
+		<edge from-layer="621" from-port="2" to-layer="622" to-port="1" />
+		<edge from-layer="622" from-port="2" to-layer="623" to-port="0" />
+		<edge from-layer="623" from-port="2" to-layer="625" to-port="0" />
+		<edge from-layer="624" from-port="0" to-layer="625" to-port="1" />
+		<edge from-layer="625" from-port="2" to-layer="627" to-port="0" />
+		<edge from-layer="626" from-port="0" to-layer="627" to-port="1" />
+		<edge from-layer="627" from-port="2" to-layer="630" to-port="0" />
+		<edge from-layer="628" from-port="0" to-layer="629" to-port="0" />
+		<edge from-layer="629" from-port="1" to-layer="630" to-port="1" />
+		<edge from-layer="630" from-port="2" to-layer="633" to-port="0" />
+		<edge from-layer="631" from-port="0" to-layer="632" to-port="0" />
+		<edge from-layer="632" from-port="1" to-layer="633" to-port="1" />
+		<edge from-layer="633" from-port="2" to-layer="634" to-port="0" />
+		<edge from-layer="634" from-port="2" to-layer="3605" to-port="1" />
+		<edge from-layer="634" from-port="2" to-layer="637" to-port="0" />
+		<edge from-layer="635" from-port="0" to-layer="636" to-port="0" />
+		<edge from-layer="636" from-port="1" to-layer="637" to-port="1" />
+		<edge from-layer="637" from-port="2" to-layer="640" to-port="0" />
+		<edge from-layer="638" from-port="0" to-layer="639" to-port="0" />
+		<edge from-layer="639" from-port="1" to-layer="640" to-port="1" />
+		<edge from-layer="640" from-port="2" to-layer="3355" to-port="1" />
+		<edge from-layer="640" from-port="2" to-layer="648" to-port="0" />
+		<edge from-layer="640" from-port="2" to-layer="643" to-port="0" />
+		<edge from-layer="640" from-port="2" to-layer="651" to-port="0" />
+		<edge from-layer="641" from-port="0" to-layer="642" to-port="0" />
+		<edge from-layer="642" from-port="1" to-layer="643" to-port="1" />
+		<edge from-layer="643" from-port="2" to-layer="646" to-port="0" />
+		<edge from-layer="644" from-port="0" to-layer="645" to-port="0" />
+		<edge from-layer="645" from-port="1" to-layer="646" to-port="1" />
+		<edge from-layer="646" from-port="2" to-layer="696" to-port="0" />
+		<edge from-layer="647" from-port="0" to-layer="648" to-port="1" />
+		<edge from-layer="648" from-port="2" to-layer="650" to-port="0" />
+		<edge from-layer="649" from-port="0" to-layer="650" to-port="1" />
+		<edge from-layer="650" from-port="2" to-layer="652" to-port="0" />
+		<edge from-layer="651" from-port="1" to-layer="652" to-port="1" />
+		<edge from-layer="652" from-port="2" to-layer="655" to-port="0" />
+		<edge from-layer="653" from-port="0" to-layer="654" to-port="0" />
+		<edge from-layer="654" from-port="1" to-layer="655" to-port="1" />
+		<edge from-layer="655" from-port="2" to-layer="658" to-port="0" />
+		<edge from-layer="656" from-port="0" to-layer="657" to-port="0" />
+		<edge from-layer="657" from-port="1" to-layer="658" to-port="1" />
+		<edge from-layer="658" from-port="2" to-layer="659" to-port="0" />
+		<edge from-layer="659" from-port="1" to-layer="662" to-port="0" />
+		<edge from-layer="660" from-port="0" to-layer="661" to-port="0" />
+		<edge from-layer="661" from-port="1" to-layer="662" to-port="1" />
+		<edge from-layer="662" from-port="2" to-layer="665" to-port="0" />
+		<edge from-layer="663" from-port="0" to-layer="664" to-port="0" />
+		<edge from-layer="664" from-port="1" to-layer="665" to-port="1" />
+		<edge from-layer="665" from-port="2" to-layer="676" to-port="0" />
+		<edge from-layer="666" from-port="0" to-layer="667" to-port="0" />
+		<edge from-layer="667" from-port="1" to-layer="668" to-port="1" />
+		<edge from-layer="668" from-port="2" to-layer="671" to-port="0" />
+		<edge from-layer="669" from-port="0" to-layer="670" to-port="0" />
+		<edge from-layer="670" from-port="1" to-layer="671" to-port="1" />
+		<edge from-layer="671" from-port="2" to-layer="673" to-port="0" />
+		<edge from-layer="672" from-port="0" to-layer="673" to-port="1" />
+		<edge from-layer="673" from-port="2" to-layer="675" to-port="0" />
+		<edge from-layer="674" from-port="0" to-layer="675" to-port="1" />
+		<edge from-layer="675" from-port="2" to-layer="676" to-port="1" />
+		<edge from-layer="676" from-port="2" to-layer="681" to-port="0" />
+		<edge from-layer="676" from-port="2" to-layer="678" to-port="0" />
+		<edge from-layer="677" from-port="0" to-layer="678" to-port="1" />
+		<edge from-layer="678" from-port="2" to-layer="680" to-port="0" />
+		<edge from-layer="679" from-port="0" to-layer="680" to-port="1" />
+		<edge from-layer="680" from-port="2" to-layer="682" to-port="0" />
+		<edge from-layer="681" from-port="1" to-layer="682" to-port="1" />
+		<edge from-layer="682" from-port="2" to-layer="685" to-port="0" />
+		<edge from-layer="683" from-port="0" to-layer="684" to-port="0" />
+		<edge from-layer="684" from-port="1" to-layer="685" to-port="1" />
+		<edge from-layer="685" from-port="2" to-layer="688" to-port="0" />
+		<edge from-layer="686" from-port="0" to-layer="687" to-port="0" />
+		<edge from-layer="687" from-port="1" to-layer="688" to-port="1" />
+		<edge from-layer="688" from-port="2" to-layer="689" to-port="0" />
+		<edge from-layer="689" from-port="1" to-layer="692" to-port="0" />
+		<edge from-layer="690" from-port="0" to-layer="691" to-port="0" />
+		<edge from-layer="691" from-port="1" to-layer="692" to-port="1" />
+		<edge from-layer="692" from-port="2" to-layer="695" to-port="0" />
+		<edge from-layer="693" from-port="0" to-layer="694" to-port="0" />
+		<edge from-layer="694" from-port="1" to-layer="695" to-port="1" />
+		<edge from-layer="695" from-port="2" to-layer="696" to-port="1" />
+		<edge from-layer="696" from-port="2" to-layer="698" to-port="0" />
+		<edge from-layer="696" from-port="2" to-layer="877" to-port="1" />
+		<edge from-layer="696" from-port="2" to-layer="701" to-port="0" />
+		<edge from-layer="697" from-port="0" to-layer="698" to-port="1" />
+		<edge from-layer="698" from-port="2" to-layer="700" to-port="0" />
+		<edge from-layer="699" from-port="0" to-layer="700" to-port="1" />
+		<edge from-layer="700" from-port="2" to-layer="702" to-port="0" />
+		<edge from-layer="701" from-port="1" to-layer="702" to-port="1" />
+		<edge from-layer="702" from-port="2" to-layer="705" to-port="0" />
+		<edge from-layer="703" from-port="0" to-layer="704" to-port="0" />
+		<edge from-layer="704" from-port="1" to-layer="705" to-port="1" />
+		<edge from-layer="705" from-port="2" to-layer="708" to-port="0" />
+		<edge from-layer="706" from-port="0" to-layer="707" to-port="0" />
+		<edge from-layer="707" from-port="1" to-layer="708" to-port="1" />
+		<edge from-layer="708" from-port="2" to-layer="711" to-port="0" />
+		<edge from-layer="709" from-port="0" to-layer="710" to-port="0" />
+		<edge from-layer="710" from-port="1" to-layer="711" to-port="1" />
+		<edge from-layer="711" from-port="2" to-layer="714" to-port="0" />
+		<edge from-layer="712" from-port="0" to-layer="713" to-port="0" />
+		<edge from-layer="713" from-port="1" to-layer="714" to-port="1" />
+		<edge from-layer="714" from-port="2" to-layer="716" to-port="0" />
+		<edge from-layer="715" from-port="0" to-layer="716" to-port="1" />
+		<edge from-layer="716" from-port="2" to-layer="718" to-port="0" />
+		<edge from-layer="717" from-port="0" to-layer="718" to-port="1" />
+		<edge from-layer="718" from-port="2" to-layer="720" to-port="0" />
+		<edge from-layer="718" from-port="2" to-layer="770" to-port="1" />
+		<edge from-layer="719" from-port="0" to-layer="720" to-port="1" />
+		<edge from-layer="720" from-port="2" to-layer="723" to-port="0" />
+		<edge from-layer="721" from-port="0" to-layer="722" to-port="0" />
+		<edge from-layer="722" from-port="1" to-layer="723" to-port="1" />
+		<edge from-layer="723" from-port="2" to-layer="726" to-port="0" />
+		<edge from-layer="724" from-port="0" to-layer="725" to-port="0" />
+		<edge from-layer="725" from-port="1" to-layer="726" to-port="1" />
+		<edge from-layer="726" from-port="2" to-layer="738" to-port="0" />
+		<edge from-layer="726" from-port="2" to-layer="729" to-port="0" />
+		<edge from-layer="726" from-port="2" to-layer="752" to-port="0" />
+		<edge from-layer="727" from-port="0" to-layer="728" to-port="0" />
+		<edge from-layer="728" from-port="1" to-layer="729" to-port="1" />
+		<edge from-layer="729" from-port="2" to-layer="731" to-port="0" />
+		<edge from-layer="730" from-port="0" to-layer="731" to-port="1" />
+		<edge from-layer="731" from-port="2" to-layer="733" to-port="0" />
+		<edge from-layer="732" from-port="0" to-layer="733" to-port="1" />
+		<edge from-layer="733" from-port="2" to-layer="735" to-port="0" />
+		<edge from-layer="734" from-port="0" to-layer="735" to-port="1" />
+		<edge from-layer="735" from-port="2" to-layer="748" to-port="0" />
+		<edge from-layer="736" from-port="0" to-layer="737" to-port="0" />
+		<edge from-layer="737" from-port="1" to-layer="738" to-port="1" />
+		<edge from-layer="738" from-port="2" to-layer="740" to-port="0" />
+		<edge from-layer="739" from-port="0" to-layer="740" to-port="1" />
+		<edge from-layer="740" from-port="2" to-layer="742" to-port="0" />
+		<edge from-layer="741" from-port="0" to-layer="742" to-port="1" />
+		<edge from-layer="742" from-port="2" to-layer="744" to-port="0" />
+		<edge from-layer="743" from-port="0" to-layer="744" to-port="1" />
+		<edge from-layer="744" from-port="2" to-layer="747" to-port="0" />
+		<edge from-layer="745" from-port="0" to-layer="746" to-port="0" />
+		<edge from-layer="746" from-port="1" to-layer="747" to-port="1" />
+		<edge from-layer="747" from-port="2" to-layer="748" to-port="1" />
+		<edge from-layer="748" from-port="2" to-layer="749" to-port="0" />
+		<edge from-layer="749" from-port="1" to-layer="759" to-port="0" />
+		<edge from-layer="750" from-port="0" to-layer="751" to-port="0" />
+		<edge from-layer="751" from-port="1" to-layer="752" to-port="1" />
+		<edge from-layer="752" from-port="2" to-layer="754" to-port="0" />
+		<edge from-layer="753" from-port="0" to-layer="754" to-port="1" />
+		<edge from-layer="754" from-port="2" to-layer="756" to-port="0" />
+		<edge from-layer="755" from-port="0" to-layer="756" to-port="1" />
+		<edge from-layer="756" from-port="2" to-layer="758" to-port="0" />
+		<edge from-layer="757" from-port="0" to-layer="758" to-port="1" />
+		<edge from-layer="758" from-port="2" to-layer="759" to-port="1" />
+		<edge from-layer="759" from-port="2" to-layer="761" to-port="0" />
+		<edge from-layer="760" from-port="0" to-layer="761" to-port="1" />
+		<edge from-layer="761" from-port="2" to-layer="763" to-port="0" />
+		<edge from-layer="762" from-port="0" to-layer="763" to-port="1" />
+		<edge from-layer="763" from-port="2" to-layer="765" to-port="0" />
+		<edge from-layer="764" from-port="0" to-layer="765" to-port="1" />
+		<edge from-layer="765" from-port="2" to-layer="768" to-port="0" />
+		<edge from-layer="766" from-port="0" to-layer="767" to-port="0" />
+		<edge from-layer="767" from-port="1" to-layer="768" to-port="1" />
+		<edge from-layer="768" from-port="2" to-layer="769" to-port="1" />
+		<edge from-layer="769" from-port="2" to-layer="770" to-port="0" />
+		<edge from-layer="770" from-port="2" to-layer="772" to-port="0" />
+		<edge from-layer="770" from-port="2" to-layer="822" to-port="1" />
+		<edge from-layer="771" from-port="0" to-layer="772" to-port="1" />
+		<edge from-layer="772" from-port="2" to-layer="775" to-port="0" />
+		<edge from-layer="773" from-port="0" to-layer="774" to-port="0" />
+		<edge from-layer="774" from-port="1" to-layer="775" to-port="1" />
+		<edge from-layer="775" from-port="2" to-layer="778" to-port="0" />
+		<edge from-layer="776" from-port="0" to-layer="777" to-port="0" />
+		<edge from-layer="777" from-port="1" to-layer="778" to-port="1" />
+		<edge from-layer="778" from-port="2" to-layer="781" to-port="0" />
+		<edge from-layer="779" from-port="0" to-layer="780" to-port="0" />
+		<edge from-layer="780" from-port="1" to-layer="781" to-port="1" />
+		<edge from-layer="781" from-port="2" to-layer="783" to-port="0" />
+		<edge from-layer="782" from-port="0" to-layer="783" to-port="1" />
+		<edge from-layer="783" from-port="2" to-layer="785" to-port="0" />
+		<edge from-layer="784" from-port="0" to-layer="785" to-port="1" />
+		<edge from-layer="785" from-port="2" to-layer="787" to-port="0" />
+		<edge from-layer="786" from-port="0" to-layer="787" to-port="1" />
+		<edge from-layer="787" from-port="2" to-layer="800" to-port="0" />
+		<edge from-layer="788" from-port="0" to-layer="789" to-port="0" />
+		<edge from-layer="789" from-port="1" to-layer="790" to-port="1" />
+		<edge from-layer="790" from-port="2" to-layer="792" to-port="0" />
+		<edge from-layer="791" from-port="0" to-layer="792" to-port="1" />
+		<edge from-layer="792" from-port="2" to-layer="794" to-port="0" />
+		<edge from-layer="793" from-port="0" to-layer="794" to-port="1" />
+		<edge from-layer="794" from-port="2" to-layer="796" to-port="0" />
+		<edge from-layer="795" from-port="0" to-layer="796" to-port="1" />
+		<edge from-layer="796" from-port="2" to-layer="799" to-port="0" />
+		<edge from-layer="797" from-port="0" to-layer="798" to-port="0" />
+		<edge from-layer="798" from-port="1" to-layer="799" to-port="1" />
+		<edge from-layer="799" from-port="2" to-layer="800" to-port="1" />
+		<edge from-layer="800" from-port="2" to-layer="801" to-port="0" />
+		<edge from-layer="801" from-port="1" to-layer="811" to-port="0" />
+		<edge from-layer="802" from-port="0" to-layer="803" to-port="0" />
+		<edge from-layer="803" from-port="1" to-layer="804" to-port="1" />
+		<edge from-layer="804" from-port="2" to-layer="806" to-port="0" />
+		<edge from-layer="805" from-port="0" to-layer="806" to-port="1" />
+		<edge from-layer="806" from-port="2" to-layer="808" to-port="0" />
+		<edge from-layer="807" from-port="0" to-layer="808" to-port="1" />
+		<edge from-layer="808" from-port="2" to-layer="810" to-port="0" />
+		<edge from-layer="809" from-port="0" to-layer="810" to-port="1" />
+		<edge from-layer="810" from-port="2" to-layer="811" to-port="1" />
+		<edge from-layer="811" from-port="2" to-layer="813" to-port="0" />
+		<edge from-layer="812" from-port="0" to-layer="813" to-port="1" />
+		<edge from-layer="813" from-port="2" to-layer="815" to-port="0" />
+		<edge from-layer="814" from-port="0" to-layer="815" to-port="1" />
+		<edge from-layer="815" from-port="2" to-layer="817" to-port="0" />
+		<edge from-layer="816" from-port="0" to-layer="817" to-port="1" />
+		<edge from-layer="817" from-port="2" to-layer="820" to-port="0" />
+		<edge from-layer="818" from-port="0" to-layer="819" to-port="0" />
+		<edge from-layer="819" from-port="1" to-layer="820" to-port="1" />
+		<edge from-layer="820" from-port="2" to-layer="821" to-port="1" />
+		<edge from-layer="821" from-port="2" to-layer="822" to-port="0" />
+		<edge from-layer="822" from-port="2" to-layer="824" to-port="0" />
+		<edge from-layer="822" from-port="2" to-layer="866" to-port="1" />
+		<edge from-layer="823" from-port="0" to-layer="824" to-port="1" />
+		<edge from-layer="824" from-port="2" to-layer="827" to-port="0" />
+		<edge from-layer="825" from-port="0" to-layer="826" to-port="0" />
+		<edge from-layer="826" from-port="1" to-layer="827" to-port="1" />
+		<edge from-layer="827" from-port="2" to-layer="830" to-port="0" />
+		<edge from-layer="828" from-port="0" to-layer="829" to-port="0" />
+		<edge from-layer="829" from-port="1" to-layer="830" to-port="1" />
+		<edge from-layer="830" from-port="2" to-layer="833" to-port="0" />
+		<edge from-layer="831" from-port="0" to-layer="832" to-port="0" />
+		<edge from-layer="832" from-port="1" to-layer="833" to-port="1" />
+		<edge from-layer="833" from-port="2" to-layer="834" to-port="1" />
+		<edge from-layer="834" from-port="2" to-layer="838" to-port="0" />
+		<edge from-layer="834" from-port="2" to-layer="849" to-port="0" />
+		<edge from-layer="834" from-port="2" to-layer="859" to-port="0" />
+		<edge from-layer="835" from-port="0" to-layer="849" to-port="1" />
+		<edge from-layer="836" from-port="0" to-layer="847" to-port="0" />
+		<edge from-layer="837" from-port="0" to-layer="847" to-port="1" />
+		<edge from-layer="838" from-port="1" to-layer="841" to-port="0" />
+		<edge from-layer="839" from-port="0" to-layer="841" to-port="1" />
+		<edge from-layer="840" from-port="0" to-layer="841" to-port="2" />
+		<edge from-layer="841" from-port="3" to-layer="843" to-port="0" />
+		<edge from-layer="842" from-port="0" to-layer="843" to-port="1" />
+		<edge from-layer="843" from-port="2" to-layer="845" to-port="0" />
+		<edge from-layer="844" from-port="0" to-layer="845" to-port="1" />
+		<edge from-layer="845" from-port="2" to-layer="847" to-port="2" />
+		<edge from-layer="845" from-port="2" to-layer="856" to-port="0" />
+		<edge from-layer="845" from-port="2" to-layer="853" to-port="2" />
+		<edge from-layer="846" from-port="0" to-layer="847" to-port="3" />
+		<edge from-layer="847" from-port="4" to-layer="849" to-port="2" />
+		<edge from-layer="848" from-port="0" to-layer="849" to-port="3" />
+		<edge from-layer="849" from-port="4" to-layer="861" to-port="0" />
+		<edge from-layer="850" from-port="0" to-layer="853" to-port="0" />
+		<edge from-layer="851" from-port="0" to-layer="853" to-port="1" />
+		<edge from-layer="851" from-port="0" to-layer="857" to-port="1" />
+		<edge from-layer="852" from-port="0" to-layer="853" to-port="3" />
+		<edge from-layer="852" from-port="0" to-layer="857" to-port="3" />
+		<edge from-layer="853" from-port="4" to-layer="859" to-port="1" />
+		<edge from-layer="854" from-port="0" to-layer="857" to-port="0" />
+		<edge from-layer="855" from-port="0" to-layer="856" to-port="1" />
+		<edge from-layer="856" from-port="2" to-layer="857" to-port="2" />
+		<edge from-layer="857" from-port="4" to-layer="859" to-port="2" />
+		<edge from-layer="858" from-port="0" to-layer="859" to-port="3" />
+		<edge from-layer="859" from-port="4" to-layer="860" to-port="0" />
+		<edge from-layer="860" from-port="1" to-layer="861" to-port="1" />
+		<edge from-layer="861" from-port="2" to-layer="864" to-port="0" />
+		<edge from-layer="862" from-port="0" to-layer="863" to-port="0" />
+		<edge from-layer="863" from-port="1" to-layer="864" to-port="1" />
+		<edge from-layer="864" from-port="2" to-layer="865" to-port="1" />
+		<edge from-layer="865" from-port="2" to-layer="866" to-port="0" />
+		<edge from-layer="866" from-port="2" to-layer="868" to-port="0" />
+		<edge from-layer="867" from-port="0" to-layer="868" to-port="1" />
+		<edge from-layer="868" from-port="2" to-layer="870" to-port="0" />
+		<edge from-layer="869" from-port="0" to-layer="870" to-port="1" />
+		<edge from-layer="870" from-port="2" to-layer="873" to-port="0" />
+		<edge from-layer="871" from-port="0" to-layer="872" to-port="0" />
+		<edge from-layer="872" from-port="1" to-layer="873" to-port="1" />
+		<edge from-layer="873" from-port="2" to-layer="876" to-port="0" />
+		<edge from-layer="874" from-port="0" to-layer="875" to-port="0" />
+		<edge from-layer="875" from-port="1" to-layer="876" to-port="1" />
+		<edge from-layer="876" from-port="2" to-layer="877" to-port="0" />
+		<edge from-layer="877" from-port="2" to-layer="879" to-port="0" />
+		<edge from-layer="877" from-port="2" to-layer="882" to-port="0" />
+		<edge from-layer="877" from-port="2" to-layer="927" to-port="0" />
+		<edge from-layer="877" from-port="2" to-layer="3117" to-port="1" />
+		<edge from-layer="878" from-port="0" to-layer="879" to-port="1" />
+		<edge from-layer="879" from-port="2" to-layer="881" to-port="0" />
+		<edge from-layer="880" from-port="0" to-layer="881" to-port="1" />
+		<edge from-layer="881" from-port="2" to-layer="883" to-port="0" />
+		<edge from-layer="882" from-port="1" to-layer="883" to-port="1" />
+		<edge from-layer="883" from-port="2" to-layer="886" to-port="0" />
+		<edge from-layer="884" from-port="0" to-layer="885" to-port="0" />
+		<edge from-layer="885" from-port="1" to-layer="886" to-port="1" />
+		<edge from-layer="886" from-port="2" to-layer="889" to-port="0" />
+		<edge from-layer="887" from-port="0" to-layer="888" to-port="0" />
+		<edge from-layer="888" from-port="1" to-layer="889" to-port="1" />
+		<edge from-layer="889" from-port="2" to-layer="890" to-port="0" />
+		<edge from-layer="890" from-port="1" to-layer="893" to-port="0" />
+		<edge from-layer="891" from-port="0" to-layer="892" to-port="0" />
+		<edge from-layer="892" from-port="1" to-layer="893" to-port="1" />
+		<edge from-layer="893" from-port="2" to-layer="896" to-port="0" />
+		<edge from-layer="894" from-port="0" to-layer="895" to-port="0" />
+		<edge from-layer="895" from-port="1" to-layer="896" to-port="1" />
+		<edge from-layer="896" from-port="2" to-layer="907" to-port="0" />
+		<edge from-layer="897" from-port="0" to-layer="898" to-port="0" />
+		<edge from-layer="898" from-port="1" to-layer="899" to-port="1" />
+		<edge from-layer="899" from-port="2" to-layer="902" to-port="0" />
+		<edge from-layer="900" from-port="0" to-layer="901" to-port="0" />
+		<edge from-layer="901" from-port="1" to-layer="902" to-port="1" />
+		<edge from-layer="902" from-port="2" to-layer="904" to-port="0" />
+		<edge from-layer="903" from-port="0" to-layer="904" to-port="1" />
+		<edge from-layer="904" from-port="2" to-layer="906" to-port="0" />
+		<edge from-layer="905" from-port="0" to-layer="906" to-port="1" />
+		<edge from-layer="906" from-port="2" to-layer="907" to-port="1" />
+		<edge from-layer="907" from-port="2" to-layer="909" to-port="0" />
+		<edge from-layer="907" from-port="2" to-layer="912" to-port="0" />
+		<edge from-layer="908" from-port="0" to-layer="909" to-port="1" />
+		<edge from-layer="909" from-port="2" to-layer="911" to-port="0" />
+		<edge from-layer="910" from-port="0" to-layer="911" to-port="1" />
+		<edge from-layer="911" from-port="2" to-layer="913" to-port="0" />
+		<edge from-layer="912" from-port="1" to-layer="913" to-port="1" />
+		<edge from-layer="913" from-port="2" to-layer="916" to-port="0" />
+		<edge from-layer="914" from-port="0" to-layer="915" to-port="0" />
+		<edge from-layer="915" from-port="1" to-layer="916" to-port="1" />
+		<edge from-layer="916" from-port="2" to-layer="919" to-port="0" />
+		<edge from-layer="917" from-port="0" to-layer="918" to-port="0" />
+		<edge from-layer="918" from-port="1" to-layer="919" to-port="1" />
+		<edge from-layer="919" from-port="2" to-layer="920" to-port="0" />
+		<edge from-layer="920" from-port="1" to-layer="923" to-port="0" />
+		<edge from-layer="921" from-port="0" to-layer="922" to-port="0" />
+		<edge from-layer="922" from-port="1" to-layer="923" to-port="1" />
+		<edge from-layer="923" from-port="2" to-layer="926" to-port="0" />
+		<edge from-layer="924" from-port="0" to-layer="925" to-port="0" />
+		<edge from-layer="925" from-port="1" to-layer="926" to-port="1" />
+		<edge from-layer="926" from-port="2" to-layer="927" to-port="1" />
+		<edge from-layer="927" from-port="2" to-layer="932" to-port="0" />
+		<edge from-layer="927" from-port="2" to-layer="929" to-port="0" />
+		<edge from-layer="927" from-port="2" to-layer="1108" to-port="1" />
+		<edge from-layer="928" from-port="0" to-layer="929" to-port="1" />
+		<edge from-layer="929" from-port="2" to-layer="931" to-port="0" />
+		<edge from-layer="930" from-port="0" to-layer="931" to-port="1" />
+		<edge from-layer="931" from-port="2" to-layer="933" to-port="0" />
+		<edge from-layer="932" from-port="1" to-layer="933" to-port="1" />
+		<edge from-layer="933" from-port="2" to-layer="936" to-port="0" />
+		<edge from-layer="934" from-port="0" to-layer="935" to-port="0" />
+		<edge from-layer="935" from-port="1" to-layer="936" to-port="1" />
+		<edge from-layer="936" from-port="2" to-layer="939" to-port="0" />
+		<edge from-layer="937" from-port="0" to-layer="938" to-port="0" />
+		<edge from-layer="938" from-port="1" to-layer="939" to-port="1" />
+		<edge from-layer="939" from-port="2" to-layer="942" to-port="0" />
+		<edge from-layer="940" from-port="0" to-layer="941" to-port="0" />
+		<edge from-layer="941" from-port="1" to-layer="942" to-port="1" />
+		<edge from-layer="942" from-port="2" to-layer="945" to-port="0" />
+		<edge from-layer="943" from-port="0" to-layer="944" to-port="0" />
+		<edge from-layer="944" from-port="1" to-layer="945" to-port="1" />
+		<edge from-layer="945" from-port="2" to-layer="947" to-port="0" />
+		<edge from-layer="946" from-port="0" to-layer="947" to-port="1" />
+		<edge from-layer="947" from-port="2" to-layer="949" to-port="0" />
+		<edge from-layer="948" from-port="0" to-layer="949" to-port="1" />
+		<edge from-layer="949" from-port="2" to-layer="951" to-port="0" />
+		<edge from-layer="949" from-port="2" to-layer="1001" to-port="1" />
+		<edge from-layer="950" from-port="0" to-layer="951" to-port="1" />
+		<edge from-layer="951" from-port="2" to-layer="954" to-port="0" />
+		<edge from-layer="952" from-port="0" to-layer="953" to-port="0" />
+		<edge from-layer="953" from-port="1" to-layer="954" to-port="1" />
+		<edge from-layer="954" from-port="2" to-layer="957" to-port="0" />
+		<edge from-layer="955" from-port="0" to-layer="956" to-port="0" />
+		<edge from-layer="956" from-port="1" to-layer="957" to-port="1" />
+		<edge from-layer="957" from-port="2" to-layer="983" to-port="0" />
+		<edge from-layer="957" from-port="2" to-layer="969" to-port="0" />
+		<edge from-layer="957" from-port="2" to-layer="960" to-port="0" />
+		<edge from-layer="958" from-port="0" to-layer="959" to-port="0" />
+		<edge from-layer="959" from-port="1" to-layer="960" to-port="1" />
+		<edge from-layer="960" from-port="2" to-layer="962" to-port="0" />
+		<edge from-layer="961" from-port="0" to-layer="962" to-port="1" />
+		<edge from-layer="962" from-port="2" to-layer="964" to-port="0" />
+		<edge from-layer="963" from-port="0" to-layer="964" to-port="1" />
+		<edge from-layer="964" from-port="2" to-layer="966" to-port="0" />
+		<edge from-layer="965" from-port="0" to-layer="966" to-port="1" />
+		<edge from-layer="966" from-port="2" to-layer="979" to-port="0" />
+		<edge from-layer="967" from-port="0" to-layer="968" to-port="0" />
+		<edge from-layer="968" from-port="1" to-layer="969" to-port="1" />
+		<edge from-layer="969" from-port="2" to-layer="971" to-port="0" />
+		<edge from-layer="970" from-port="0" to-layer="971" to-port="1" />
+		<edge from-layer="971" from-port="2" to-layer="973" to-port="0" />
+		<edge from-layer="972" from-port="0" to-layer="973" to-port="1" />
+		<edge from-layer="973" from-port="2" to-layer="975" to-port="0" />
+		<edge from-layer="974" from-port="0" to-layer="975" to-port="1" />
+		<edge from-layer="975" from-port="2" to-layer="978" to-port="0" />
+		<edge from-layer="976" from-port="0" to-layer="977" to-port="0" />
+		<edge from-layer="977" from-port="1" to-layer="978" to-port="1" />
+		<edge from-layer="978" from-port="2" to-layer="979" to-port="1" />
+		<edge from-layer="979" from-port="2" to-layer="980" to-port="0" />
+		<edge from-layer="980" from-port="1" to-layer="990" to-port="0" />
+		<edge from-layer="981" from-port="0" to-layer="982" to-port="0" />
+		<edge from-layer="982" from-port="1" to-layer="983" to-port="1" />
+		<edge from-layer="983" from-port="2" to-layer="985" to-port="0" />
+		<edge from-layer="984" from-port="0" to-layer="985" to-port="1" />
+		<edge from-layer="985" from-port="2" to-layer="987" to-port="0" />
+		<edge from-layer="986" from-port="0" to-layer="987" to-port="1" />
+		<edge from-layer="987" from-port="2" to-layer="989" to-port="0" />
+		<edge from-layer="988" from-port="0" to-layer="989" to-port="1" />
+		<edge from-layer="989" from-port="2" to-layer="990" to-port="1" />
+		<edge from-layer="990" from-port="2" to-layer="992" to-port="0" />
+		<edge from-layer="991" from-port="0" to-layer="992" to-port="1" />
+		<edge from-layer="992" from-port="2" to-layer="994" to-port="0" />
+		<edge from-layer="993" from-port="0" to-layer="994" to-port="1" />
+		<edge from-layer="994" from-port="2" to-layer="996" to-port="0" />
+		<edge from-layer="995" from-port="0" to-layer="996" to-port="1" />
+		<edge from-layer="996" from-port="2" to-layer="999" to-port="0" />
+		<edge from-layer="997" from-port="0" to-layer="998" to-port="0" />
+		<edge from-layer="998" from-port="1" to-layer="999" to-port="1" />
+		<edge from-layer="999" from-port="2" to-layer="1000" to-port="1" />
+		<edge from-layer="1000" from-port="2" to-layer="1001" to-port="0" />
+		<edge from-layer="1001" from-port="2" to-layer="1003" to-port="0" />
+		<edge from-layer="1001" from-port="2" to-layer="1053" to-port="1" />
+		<edge from-layer="1002" from-port="0" to-layer="1003" to-port="1" />
+		<edge from-layer="1003" from-port="2" to-layer="1006" to-port="0" />
+		<edge from-layer="1004" from-port="0" to-layer="1005" to-port="0" />
+		<edge from-layer="1005" from-port="1" to-layer="1006" to-port="1" />
+		<edge from-layer="1006" from-port="2" to-layer="1009" to-port="0" />
+		<edge from-layer="1007" from-port="0" to-layer="1008" to-port="0" />
+		<edge from-layer="1008" from-port="1" to-layer="1009" to-port="1" />
+		<edge from-layer="1009" from-port="2" to-layer="1012" to-port="0" />
+		<edge from-layer="1010" from-port="0" to-layer="1011" to-port="0" />
+		<edge from-layer="1011" from-port="1" to-layer="1012" to-port="1" />
+		<edge from-layer="1012" from-port="2" to-layer="1014" to-port="0" />
+		<edge from-layer="1013" from-port="0" to-layer="1014" to-port="1" />
+		<edge from-layer="1014" from-port="2" to-layer="1016" to-port="0" />
+		<edge from-layer="1015" from-port="0" to-layer="1016" to-port="1" />
+		<edge from-layer="1016" from-port="2" to-layer="1018" to-port="0" />
+		<edge from-layer="1017" from-port="0" to-layer="1018" to-port="1" />
+		<edge from-layer="1018" from-port="2" to-layer="1031" to-port="0" />
+		<edge from-layer="1019" from-port="0" to-layer="1020" to-port="0" />
+		<edge from-layer="1020" from-port="1" to-layer="1021" to-port="1" />
+		<edge from-layer="1021" from-port="2" to-layer="1023" to-port="0" />
+		<edge from-layer="1022" from-port="0" to-layer="1023" to-port="1" />
+		<edge from-layer="1023" from-port="2" to-layer="1025" to-port="0" />
+		<edge from-layer="1024" from-port="0" to-layer="1025" to-port="1" />
+		<edge from-layer="1025" from-port="2" to-layer="1027" to-port="0" />
+		<edge from-layer="1026" from-port="0" to-layer="1027" to-port="1" />
+		<edge from-layer="1027" from-port="2" to-layer="1030" to-port="0" />
+		<edge from-layer="1028" from-port="0" to-layer="1029" to-port="0" />
+		<edge from-layer="1029" from-port="1" to-layer="1030" to-port="1" />
+		<edge from-layer="1030" from-port="2" to-layer="1031" to-port="1" />
+		<edge from-layer="1031" from-port="2" to-layer="1032" to-port="0" />
+		<edge from-layer="1032" from-port="1" to-layer="1042" to-port="0" />
+		<edge from-layer="1033" from-port="0" to-layer="1034" to-port="0" />
+		<edge from-layer="1034" from-port="1" to-layer="1035" to-port="1" />
+		<edge from-layer="1035" from-port="2" to-layer="1037" to-port="0" />
+		<edge from-layer="1036" from-port="0" to-layer="1037" to-port="1" />
+		<edge from-layer="1037" from-port="2" to-layer="1039" to-port="0" />
+		<edge from-layer="1038" from-port="0" to-layer="1039" to-port="1" />
+		<edge from-layer="1039" from-port="2" to-layer="1041" to-port="0" />
+		<edge from-layer="1040" from-port="0" to-layer="1041" to-port="1" />
+		<edge from-layer="1041" from-port="2" to-layer="1042" to-port="1" />
+		<edge from-layer="1042" from-port="2" to-layer="1044" to-port="0" />
+		<edge from-layer="1043" from-port="0" to-layer="1044" to-port="1" />
+		<edge from-layer="1044" from-port="2" to-layer="1046" to-port="0" />
+		<edge from-layer="1045" from-port="0" to-layer="1046" to-port="1" />
+		<edge from-layer="1046" from-port="2" to-layer="1048" to-port="0" />
+		<edge from-layer="1047" from-port="0" to-layer="1048" to-port="1" />
+		<edge from-layer="1048" from-port="2" to-layer="1051" to-port="0" />
+		<edge from-layer="1049" from-port="0" to-layer="1050" to-port="0" />
+		<edge from-layer="1050" from-port="1" to-layer="1051" to-port="1" />
+		<edge from-layer="1051" from-port="2" to-layer="1052" to-port="1" />
+		<edge from-layer="1052" from-port="2" to-layer="1053" to-port="0" />
+		<edge from-layer="1053" from-port="2" to-layer="1097" to-port="1" />
+		<edge from-layer="1053" from-port="2" to-layer="1055" to-port="0" />
+		<edge from-layer="1054" from-port="0" to-layer="1055" to-port="1" />
+		<edge from-layer="1055" from-port="2" to-layer="1058" to-port="0" />
+		<edge from-layer="1056" from-port="0" to-layer="1057" to-port="0" />
+		<edge from-layer="1057" from-port="1" to-layer="1058" to-port="1" />
+		<edge from-layer="1058" from-port="2" to-layer="1061" to-port="0" />
+		<edge from-layer="1059" from-port="0" to-layer="1060" to-port="0" />
+		<edge from-layer="1060" from-port="1" to-layer="1061" to-port="1" />
+		<edge from-layer="1061" from-port="2" to-layer="1064" to-port="0" />
+		<edge from-layer="1062" from-port="0" to-layer="1063" to-port="0" />
+		<edge from-layer="1063" from-port="1" to-layer="1064" to-port="1" />
+		<edge from-layer="1064" from-port="2" to-layer="1065" to-port="1" />
+		<edge from-layer="1065" from-port="2" to-layer="1069" to-port="0" />
+		<edge from-layer="1065" from-port="2" to-layer="1080" to-port="0" />
+		<edge from-layer="1065" from-port="2" to-layer="1090" to-port="0" />
+		<edge from-layer="1066" from-port="0" to-layer="1080" to-port="1" />
+		<edge from-layer="1067" from-port="0" to-layer="1078" to-port="0" />
+		<edge from-layer="1068" from-port="0" to-layer="1078" to-port="1" />
+		<edge from-layer="1069" from-port="1" to-layer="1072" to-port="0" />
+		<edge from-layer="1070" from-port="0" to-layer="1072" to-port="1" />
+		<edge from-layer="1071" from-port="0" to-layer="1072" to-port="2" />
+		<edge from-layer="1072" from-port="3" to-layer="1074" to-port="0" />
+		<edge from-layer="1073" from-port="0" to-layer="1074" to-port="1" />
+		<edge from-layer="1074" from-port="2" to-layer="1076" to-port="0" />
+		<edge from-layer="1075" from-port="0" to-layer="1076" to-port="1" />
+		<edge from-layer="1076" from-port="2" to-layer="1084" to-port="2" />
+		<edge from-layer="1076" from-port="2" to-layer="1087" to-port="0" />
+		<edge from-layer="1076" from-port="2" to-layer="1078" to-port="2" />
+		<edge from-layer="1077" from-port="0" to-layer="1078" to-port="3" />
+		<edge from-layer="1078" from-port="4" to-layer="1080" to-port="2" />
+		<edge from-layer="1079" from-port="0" to-layer="1080" to-port="3" />
+		<edge from-layer="1080" from-port="4" to-layer="1092" to-port="0" />
+		<edge from-layer="1081" from-port="0" to-layer="1084" to-port="0" />
+		<edge from-layer="1082" from-port="0" to-layer="1088" to-port="1" />
+		<edge from-layer="1082" from-port="0" to-layer="1084" to-port="1" />
+		<edge from-layer="1083" from-port="0" to-layer="1084" to-port="3" />
+		<edge from-layer="1083" from-port="0" to-layer="1088" to-port="3" />
+		<edge from-layer="1084" from-port="4" to-layer="1090" to-port="1" />
+		<edge from-layer="1085" from-port="0" to-layer="1088" to-port="0" />
+		<edge from-layer="1086" from-port="0" to-layer="1087" to-port="1" />
+		<edge from-layer="1087" from-port="2" to-layer="1088" to-port="2" />
+		<edge from-layer="1088" from-port="4" to-layer="1090" to-port="2" />
+		<edge from-layer="1089" from-port="0" to-layer="1090" to-port="3" />
+		<edge from-layer="1090" from-port="4" to-layer="1091" to-port="0" />
+		<edge from-layer="1091" from-port="1" to-layer="1092" to-port="1" />
+		<edge from-layer="1092" from-port="2" to-layer="1095" to-port="0" />
+		<edge from-layer="1093" from-port="0" to-layer="1094" to-port="0" />
+		<edge from-layer="1094" from-port="1" to-layer="1095" to-port="1" />
+		<edge from-layer="1095" from-port="2" to-layer="1096" to-port="1" />
+		<edge from-layer="1096" from-port="2" to-layer="1097" to-port="0" />
+		<edge from-layer="1097" from-port="2" to-layer="1099" to-port="0" />
+		<edge from-layer="1098" from-port="0" to-layer="1099" to-port="1" />
+		<edge from-layer="1099" from-port="2" to-layer="1101" to-port="0" />
+		<edge from-layer="1100" from-port="0" to-layer="1101" to-port="1" />
+		<edge from-layer="1101" from-port="2" to-layer="1104" to-port="0" />
+		<edge from-layer="1102" from-port="0" to-layer="1103" to-port="0" />
+		<edge from-layer="1103" from-port="1" to-layer="1104" to-port="1" />
+		<edge from-layer="1104" from-port="2" to-layer="1107" to-port="0" />
+		<edge from-layer="1105" from-port="0" to-layer="1106" to-port="0" />
+		<edge from-layer="1106" from-port="1" to-layer="1107" to-port="1" />
+		<edge from-layer="1107" from-port="2" to-layer="1108" to-port="0" />
+		<edge from-layer="1108" from-port="2" to-layer="2879" to-port="1" />
+		<edge from-layer="1108" from-port="2" to-layer="1111" to-port="0" />
+		<edge from-layer="1109" from-port="0" to-layer="1110" to-port="0" />
+		<edge from-layer="1110" from-port="1" to-layer="1111" to-port="1" />
+		<edge from-layer="1111" from-port="2" to-layer="1114" to-port="0" />
+		<edge from-layer="1112" from-port="0" to-layer="1113" to-port="0" />
+		<edge from-layer="1113" from-port="1" to-layer="1114" to-port="1" />
+		<edge from-layer="1114" from-port="2" to-layer="2629" to-port="1" />
+		<edge from-layer="1114" from-port="2" to-layer="1122" to-port="0" />
+		<edge from-layer="1114" from-port="2" to-layer="1125" to-port="0" />
+		<edge from-layer="1114" from-port="2" to-layer="1117" to-port="0" />
+		<edge from-layer="1115" from-port="0" to-layer="1116" to-port="0" />
+		<edge from-layer="1116" from-port="1" to-layer="1117" to-port="1" />
+		<edge from-layer="1117" from-port="2" to-layer="1120" to-port="0" />
+		<edge from-layer="1118" from-port="0" to-layer="1119" to-port="0" />
+		<edge from-layer="1119" from-port="1" to-layer="1120" to-port="1" />
+		<edge from-layer="1120" from-port="2" to-layer="1170" to-port="0" />
+		<edge from-layer="1121" from-port="0" to-layer="1122" to-port="1" />
+		<edge from-layer="1122" from-port="2" to-layer="1124" to-port="0" />
+		<edge from-layer="1123" from-port="0" to-layer="1124" to-port="1" />
+		<edge from-layer="1124" from-port="2" to-layer="1126" to-port="0" />
+		<edge from-layer="1125" from-port="1" to-layer="1126" to-port="1" />
+		<edge from-layer="1126" from-port="2" to-layer="1129" to-port="0" />
+		<edge from-layer="1127" from-port="0" to-layer="1128" to-port="0" />
+		<edge from-layer="1128" from-port="1" to-layer="1129" to-port="1" />
+		<edge from-layer="1129" from-port="2" to-layer="1132" to-port="0" />
+		<edge from-layer="1130" from-port="0" to-layer="1131" to-port="0" />
+		<edge from-layer="1131" from-port="1" to-layer="1132" to-port="1" />
+		<edge from-layer="1132" from-port="2" to-layer="1133" to-port="0" />
+		<edge from-layer="1133" from-port="1" to-layer="1136" to-port="0" />
+		<edge from-layer="1134" from-port="0" to-layer="1135" to-port="0" />
+		<edge from-layer="1135" from-port="1" to-layer="1136" to-port="1" />
+		<edge from-layer="1136" from-port="2" to-layer="1139" to-port="0" />
+		<edge from-layer="1137" from-port="0" to-layer="1138" to-port="0" />
+		<edge from-layer="1138" from-port="1" to-layer="1139" to-port="1" />
+		<edge from-layer="1139" from-port="2" to-layer="1150" to-port="0" />
+		<edge from-layer="1140" from-port="0" to-layer="1141" to-port="0" />
+		<edge from-layer="1141" from-port="1" to-layer="1142" to-port="1" />
+		<edge from-layer="1142" from-port="2" to-layer="1145" to-port="0" />
+		<edge from-layer="1143" from-port="0" to-layer="1144" to-port="0" />
+		<edge from-layer="1144" from-port="1" to-layer="1145" to-port="1" />
+		<edge from-layer="1145" from-port="2" to-layer="1147" to-port="0" />
+		<edge from-layer="1146" from-port="0" to-layer="1147" to-port="1" />
+		<edge from-layer="1147" from-port="2" to-layer="1149" to-port="0" />
+		<edge from-layer="1148" from-port="0" to-layer="1149" to-port="1" />
+		<edge from-layer="1149" from-port="2" to-layer="1150" to-port="1" />
+		<edge from-layer="1150" from-port="2" to-layer="1152" to-port="0" />
+		<edge from-layer="1150" from-port="2" to-layer="1155" to-port="0" />
+		<edge from-layer="1151" from-port="0" to-layer="1152" to-port="1" />
+		<edge from-layer="1152" from-port="2" to-layer="1154" to-port="0" />
+		<edge from-layer="1153" from-port="0" to-layer="1154" to-port="1" />
+		<edge from-layer="1154" from-port="2" to-layer="1156" to-port="0" />
+		<edge from-layer="1155" from-port="1" to-layer="1156" to-port="1" />
+		<edge from-layer="1156" from-port="2" to-layer="1159" to-port="0" />
+		<edge from-layer="1157" from-port="0" to-layer="1158" to-port="0" />
+		<edge from-layer="1158" from-port="1" to-layer="1159" to-port="1" />
+		<edge from-layer="1159" from-port="2" to-layer="1162" to-port="0" />
+		<edge from-layer="1160" from-port="0" to-layer="1161" to-port="0" />
+		<edge from-layer="1161" from-port="1" to-layer="1162" to-port="1" />
+		<edge from-layer="1162" from-port="2" to-layer="1163" to-port="0" />
+		<edge from-layer="1163" from-port="1" to-layer="1166" to-port="0" />
+		<edge from-layer="1164" from-port="0" to-layer="1165" to-port="0" />
+		<edge from-layer="1165" from-port="1" to-layer="1166" to-port="1" />
+		<edge from-layer="1166" from-port="2" to-layer="1169" to-port="0" />
+		<edge from-layer="1167" from-port="0" to-layer="1168" to-port="0" />
+		<edge from-layer="1168" from-port="1" to-layer="1169" to-port="1" />
+		<edge from-layer="1169" from-port="2" to-layer="1170" to-port="1" />
+		<edge from-layer="1170" from-port="2" to-layer="1351" to-port="1" />
+		<edge from-layer="1170" from-port="2" to-layer="1175" to-port="0" />
+		<edge from-layer="1170" from-port="2" to-layer="1172" to-port="0" />
+		<edge from-layer="1171" from-port="0" to-layer="1172" to-port="1" />
+		<edge from-layer="1172" from-port="2" to-layer="1174" to-port="0" />
+		<edge from-layer="1173" from-port="0" to-layer="1174" to-port="1" />
+		<edge from-layer="1174" from-port="2" to-layer="1176" to-port="0" />
+		<edge from-layer="1175" from-port="1" to-layer="1176" to-port="1" />
+		<edge from-layer="1176" from-port="2" to-layer="1179" to-port="0" />
+		<edge from-layer="1177" from-port="0" to-layer="1178" to-port="0" />
+		<edge from-layer="1178" from-port="1" to-layer="1179" to-port="1" />
+		<edge from-layer="1179" from-port="2" to-layer="1182" to-port="0" />
+		<edge from-layer="1180" from-port="0" to-layer="1181" to-port="0" />
+		<edge from-layer="1181" from-port="1" to-layer="1182" to-port="1" />
+		<edge from-layer="1182" from-port="2" to-layer="1185" to-port="0" />
+		<edge from-layer="1183" from-port="0" to-layer="1184" to-port="0" />
+		<edge from-layer="1184" from-port="1" to-layer="1185" to-port="1" />
+		<edge from-layer="1185" from-port="2" to-layer="1188" to-port="0" />
+		<edge from-layer="1186" from-port="0" to-layer="1187" to-port="0" />
+		<edge from-layer="1187" from-port="1" to-layer="1188" to-port="1" />
+		<edge from-layer="1188" from-port="2" to-layer="1190" to-port="0" />
+		<edge from-layer="1189" from-port="0" to-layer="1190" to-port="1" />
+		<edge from-layer="1190" from-port="2" to-layer="1192" to-port="0" />
+		<edge from-layer="1191" from-port="0" to-layer="1192" to-port="1" />
+		<edge from-layer="1192" from-port="2" to-layer="1244" to-port="1" />
+		<edge from-layer="1192" from-port="2" to-layer="1194" to-port="0" />
+		<edge from-layer="1193" from-port="0" to-layer="1194" to-port="1" />
+		<edge from-layer="1194" from-port="2" to-layer="1197" to-port="0" />
+		<edge from-layer="1195" from-port="0" to-layer="1196" to-port="0" />
+		<edge from-layer="1196" from-port="1" to-layer="1197" to-port="1" />
+		<edge from-layer="1197" from-port="2" to-layer="1200" to-port="0" />
+		<edge from-layer="1198" from-port="0" to-layer="1199" to-port="0" />
+		<edge from-layer="1199" from-port="1" to-layer="1200" to-port="1" />
+		<edge from-layer="1200" from-port="2" to-layer="1212" to-port="0" />
+		<edge from-layer="1200" from-port="2" to-layer="1226" to-port="0" />
+		<edge from-layer="1200" from-port="2" to-layer="1203" to-port="0" />
+		<edge from-layer="1201" from-port="0" to-layer="1202" to-port="0" />
+		<edge from-layer="1202" from-port="1" to-layer="1203" to-port="1" />
+		<edge from-layer="1203" from-port="2" to-layer="1205" to-port="0" />
+		<edge from-layer="1204" from-port="0" to-layer="1205" to-port="1" />
+		<edge from-layer="1205" from-port="2" to-layer="1207" to-port="0" />
+		<edge from-layer="1206" from-port="0" to-layer="1207" to-port="1" />
+		<edge from-layer="1207" from-port="2" to-layer="1209" to-port="0" />
+		<edge from-layer="1208" from-port="0" to-layer="1209" to-port="1" />
+		<edge from-layer="1209" from-port="2" to-layer="1222" to-port="0" />
+		<edge from-layer="1210" from-port="0" to-layer="1211" to-port="0" />
+		<edge from-layer="1211" from-port="1" to-layer="1212" to-port="1" />
+		<edge from-layer="1212" from-port="2" to-layer="1214" to-port="0" />
+		<edge from-layer="1213" from-port="0" to-layer="1214" to-port="1" />
+		<edge from-layer="1214" from-port="2" to-layer="1216" to-port="0" />
+		<edge from-layer="1215" from-port="0" to-layer="1216" to-port="1" />
+		<edge from-layer="1216" from-port="2" to-layer="1218" to-port="0" />
+		<edge from-layer="1217" from-port="0" to-layer="1218" to-port="1" />
+		<edge from-layer="1218" from-port="2" to-layer="1221" to-port="0" />
+		<edge from-layer="1219" from-port="0" to-layer="1220" to-port="0" />
+		<edge from-layer="1220" from-port="1" to-layer="1221" to-port="1" />
+		<edge from-layer="1221" from-port="2" to-layer="1222" to-port="1" />
+		<edge from-layer="1222" from-port="2" to-layer="1223" to-port="0" />
+		<edge from-layer="1223" from-port="1" to-layer="1233" to-port="0" />
+		<edge from-layer="1224" from-port="0" to-layer="1225" to-port="0" />
+		<edge from-layer="1225" from-port="1" to-layer="1226" to-port="1" />
+		<edge from-layer="1226" from-port="2" to-layer="1228" to-port="0" />
+		<edge from-layer="1227" from-port="0" to-layer="1228" to-port="1" />
+		<edge from-layer="1228" from-port="2" to-layer="1230" to-port="0" />
+		<edge from-layer="1229" from-port="0" to-layer="1230" to-port="1" />
+		<edge from-layer="1230" from-port="2" to-layer="1232" to-port="0" />
+		<edge from-layer="1231" from-port="0" to-layer="1232" to-port="1" />
+		<edge from-layer="1232" from-port="2" to-layer="1233" to-port="1" />
+		<edge from-layer="1233" from-port="2" to-layer="1235" to-port="0" />
+		<edge from-layer="1234" from-port="0" to-layer="1235" to-port="1" />
+		<edge from-layer="1235" from-port="2" to-layer="1237" to-port="0" />
+		<edge from-layer="1236" from-port="0" to-layer="1237" to-port="1" />
+		<edge from-layer="1237" from-port="2" to-layer="1239" to-port="0" />
+		<edge from-layer="1238" from-port="0" to-layer="1239" to-port="1" />
+		<edge from-layer="1239" from-port="2" to-layer="1242" to-port="0" />
+		<edge from-layer="1240" from-port="0" to-layer="1241" to-port="0" />
+		<edge from-layer="1241" from-port="1" to-layer="1242" to-port="1" />
+		<edge from-layer="1242" from-port="2" to-layer="1243" to-port="1" />
+		<edge from-layer="1243" from-port="2" to-layer="1244" to-port="0" />
+		<edge from-layer="1244" from-port="2" to-layer="1296" to-port="1" />
+		<edge from-layer="1244" from-port="2" to-layer="1246" to-port="0" />
+		<edge from-layer="1245" from-port="0" to-layer="1246" to-port="1" />
+		<edge from-layer="1246" from-port="2" to-layer="1249" to-port="0" />
+		<edge from-layer="1247" from-port="0" to-layer="1248" to-port="0" />
+		<edge from-layer="1248" from-port="1" to-layer="1249" to-port="1" />
+		<edge from-layer="1249" from-port="2" to-layer="1252" to-port="0" />
+		<edge from-layer="1250" from-port="0" to-layer="1251" to-port="0" />
+		<edge from-layer="1251" from-port="1" to-layer="1252" to-port="1" />
+		<edge from-layer="1252" from-port="2" to-layer="1255" to-port="0" />
+		<edge from-layer="1253" from-port="0" to-layer="1254" to-port="0" />
+		<edge from-layer="1254" from-port="1" to-layer="1255" to-port="1" />
+		<edge from-layer="1255" from-port="2" to-layer="1257" to-port="0" />
+		<edge from-layer="1256" from-port="0" to-layer="1257" to-port="1" />
+		<edge from-layer="1257" from-port="2" to-layer="1259" to-port="0" />
+		<edge from-layer="1258" from-port="0" to-layer="1259" to-port="1" />
+		<edge from-layer="1259" from-port="2" to-layer="1261" to-port="0" />
+		<edge from-layer="1260" from-port="0" to-layer="1261" to-port="1" />
+		<edge from-layer="1261" from-port="2" to-layer="1274" to-port="0" />
+		<edge from-layer="1262" from-port="0" to-layer="1263" to-port="0" />
+		<edge from-layer="1263" from-port="1" to-layer="1264" to-port="1" />
+		<edge from-layer="1264" from-port="2" to-layer="1266" to-port="0" />
+		<edge from-layer="1265" from-port="0" to-layer="1266" to-port="1" />
+		<edge from-layer="1266" from-port="2" to-layer="1268" to-port="0" />
+		<edge from-layer="1267" from-port="0" to-layer="1268" to-port="1" />
+		<edge from-layer="1268" from-port="2" to-layer="1270" to-port="0" />
+		<edge from-layer="1269" from-port="0" to-layer="1270" to-port="1" />
+		<edge from-layer="1270" from-port="2" to-layer="1273" to-port="0" />
+		<edge from-layer="1271" from-port="0" to-layer="1272" to-port="0" />
+		<edge from-layer="1272" from-port="1" to-layer="1273" to-port="1" />
+		<edge from-layer="1273" from-port="2" to-layer="1274" to-port="1" />
+		<edge from-layer="1274" from-port="2" to-layer="1275" to-port="0" />
+		<edge from-layer="1275" from-port="1" to-layer="1285" to-port="0" />
+		<edge from-layer="1276" from-port="0" to-layer="1277" to-port="0" />
+		<edge from-layer="1277" from-port="1" to-layer="1278" to-port="1" />
+		<edge from-layer="1278" from-port="2" to-layer="1280" to-port="0" />
+		<edge from-layer="1279" from-port="0" to-layer="1280" to-port="1" />
+		<edge from-layer="1280" from-port="2" to-layer="1282" to-port="0" />
+		<edge from-layer="1281" from-port="0" to-layer="1282" to-port="1" />
+		<edge from-layer="1282" from-port="2" to-layer="1284" to-port="0" />
+		<edge from-layer="1283" from-port="0" to-layer="1284" to-port="1" />
+		<edge from-layer="1284" from-port="2" to-layer="1285" to-port="1" />
+		<edge from-layer="1285" from-port="2" to-layer="1287" to-port="0" />
+		<edge from-layer="1286" from-port="0" to-layer="1287" to-port="1" />
+		<edge from-layer="1287" from-port="2" to-layer="1289" to-port="0" />
+		<edge from-layer="1288" from-port="0" to-layer="1289" to-port="1" />
+		<edge from-layer="1289" from-port="2" to-layer="1291" to-port="0" />
+		<edge from-layer="1290" from-port="0" to-layer="1291" to-port="1" />
+		<edge from-layer="1291" from-port="2" to-layer="1294" to-port="0" />
+		<edge from-layer="1292" from-port="0" to-layer="1293" to-port="0" />
+		<edge from-layer="1293" from-port="1" to-layer="1294" to-port="1" />
+		<edge from-layer="1294" from-port="2" to-layer="1295" to-port="1" />
+		<edge from-layer="1295" from-port="2" to-layer="1296" to-port="0" />
+		<edge from-layer="1296" from-port="2" to-layer="1340" to-port="1" />
+		<edge from-layer="1296" from-port="2" to-layer="1298" to-port="0" />
+		<edge from-layer="1297" from-port="0" to-layer="1298" to-port="1" />
+		<edge from-layer="1298" from-port="2" to-layer="1301" to-port="0" />
+		<edge from-layer="1299" from-port="0" to-layer="1300" to-port="0" />
+		<edge from-layer="1300" from-port="1" to-layer="1301" to-port="1" />
+		<edge from-layer="1301" from-port="2" to-layer="1304" to-port="0" />
+		<edge from-layer="1302" from-port="0" to-layer="1303" to-port="0" />
+		<edge from-layer="1303" from-port="1" to-layer="1304" to-port="1" />
+		<edge from-layer="1304" from-port="2" to-layer="1307" to-port="0" />
+		<edge from-layer="1305" from-port="0" to-layer="1306" to-port="0" />
+		<edge from-layer="1306" from-port="1" to-layer="1307" to-port="1" />
+		<edge from-layer="1307" from-port="2" to-layer="1308" to-port="1" />
+		<edge from-layer="1308" from-port="2" to-layer="1323" to-port="0" />
+		<edge from-layer="1308" from-port="2" to-layer="1312" to-port="0" />
+		<edge from-layer="1308" from-port="2" to-layer="1333" to-port="0" />
+		<edge from-layer="1309" from-port="0" to-layer="1323" to-port="1" />
+		<edge from-layer="1310" from-port="0" to-layer="1321" to-port="0" />
+		<edge from-layer="1311" from-port="0" to-layer="1321" to-port="1" />
+		<edge from-layer="1312" from-port="1" to-layer="1315" to-port="0" />
+		<edge from-layer="1313" from-port="0" to-layer="1315" to-port="1" />
+		<edge from-layer="1314" from-port="0" to-layer="1315" to-port="2" />
+		<edge from-layer="1315" from-port="3" to-layer="1317" to-port="0" />
+		<edge from-layer="1316" from-port="0" to-layer="1317" to-port="1" />
+		<edge from-layer="1317" from-port="2" to-layer="1319" to-port="0" />
+		<edge from-layer="1318" from-port="0" to-layer="1319" to-port="1" />
+		<edge from-layer="1319" from-port="2" to-layer="1321" to-port="2" />
+		<edge from-layer="1319" from-port="2" to-layer="1330" to-port="0" />
+		<edge from-layer="1319" from-port="2" to-layer="1327" to-port="2" />
+		<edge from-layer="1320" from-port="0" to-layer="1321" to-port="3" />
+		<edge from-layer="1321" from-port="4" to-layer="1323" to-port="2" />
+		<edge from-layer="1322" from-port="0" to-layer="1323" to-port="3" />
+		<edge from-layer="1323" from-port="4" to-layer="1335" to-port="0" />
+		<edge from-layer="1324" from-port="0" to-layer="1327" to-port="0" />
+		<edge from-layer="1325" from-port="0" to-layer="1327" to-port="1" />
+		<edge from-layer="1325" from-port="0" to-layer="1331" to-port="1" />
+		<edge from-layer="1326" from-port="0" to-layer="1327" to-port="3" />
+		<edge from-layer="1326" from-port="0" to-layer="1331" to-port="3" />
+		<edge from-layer="1327" from-port="4" to-layer="1333" to-port="1" />
+		<edge from-layer="1328" from-port="0" to-layer="1331" to-port="0" />
+		<edge from-layer="1329" from-port="0" to-layer="1330" to-port="1" />
+		<edge from-layer="1330" from-port="2" to-layer="1331" to-port="2" />
+		<edge from-layer="1331" from-port="4" to-layer="1333" to-port="2" />
+		<edge from-layer="1332" from-port="0" to-layer="1333" to-port="3" />
+		<edge from-layer="1333" from-port="4" to-layer="1334" to-port="0" />
+		<edge from-layer="1334" from-port="1" to-layer="1335" to-port="1" />
+		<edge from-layer="1335" from-port="2" to-layer="1338" to-port="0" />
+		<edge from-layer="1336" from-port="0" to-layer="1337" to-port="0" />
+		<edge from-layer="1337" from-port="1" to-layer="1338" to-port="1" />
+		<edge from-layer="1338" from-port="2" to-layer="1339" to-port="1" />
+		<edge from-layer="1339" from-port="2" to-layer="1340" to-port="0" />
+		<edge from-layer="1340" from-port="2" to-layer="1342" to-port="0" />
+		<edge from-layer="1341" from-port="0" to-layer="1342" to-port="1" />
+		<edge from-layer="1342" from-port="2" to-layer="1344" to-port="0" />
+		<edge from-layer="1343" from-port="0" to-layer="1344" to-port="1" />
+		<edge from-layer="1344" from-port="2" to-layer="1347" to-port="0" />
+		<edge from-layer="1345" from-port="0" to-layer="1346" to-port="0" />
+		<edge from-layer="1346" from-port="1" to-layer="1347" to-port="1" />
+		<edge from-layer="1347" from-port="2" to-layer="1350" to-port="0" />
+		<edge from-layer="1348" from-port="0" to-layer="1349" to-port="0" />
+		<edge from-layer="1349" from-port="1" to-layer="1350" to-port="1" />
+		<edge from-layer="1350" from-port="2" to-layer="1351" to-port="0" />
+		<edge from-layer="1351" from-port="2" to-layer="1353" to-port="0" />
+		<edge from-layer="1351" from-port="2" to-layer="1356" to-port="0" />
+		<edge from-layer="1351" from-port="2" to-layer="1401" to-port="0" />
+		<edge from-layer="1351" from-port="2" to-layer="2391" to-port="1" />
+		<edge from-layer="1352" from-port="0" to-layer="1353" to-port="1" />
+		<edge from-layer="1353" from-port="2" to-layer="1355" to-port="0" />
+		<edge from-layer="1354" from-port="0" to-layer="1355" to-port="1" />
+		<edge from-layer="1355" from-port="2" to-layer="1357" to-port="0" />
+		<edge from-layer="1356" from-port="1" to-layer="1357" to-port="1" />
+		<edge from-layer="1357" from-port="2" to-layer="1360" to-port="0" />
+		<edge from-layer="1358" from-port="0" to-layer="1359" to-port="0" />
+		<edge from-layer="1359" from-port="1" to-layer="1360" to-port="1" />
+		<edge from-layer="1360" from-port="2" to-layer="1363" to-port="0" />
+		<edge from-layer="1361" from-port="0" to-layer="1362" to-port="0" />
+		<edge from-layer="1362" from-port="1" to-layer="1363" to-port="1" />
+		<edge from-layer="1363" from-port="2" to-layer="1364" to-port="0" />
+		<edge from-layer="1364" from-port="1" to-layer="1367" to-port="0" />
+		<edge from-layer="1365" from-port="0" to-layer="1366" to-port="0" />
+		<edge from-layer="1366" from-port="1" to-layer="1367" to-port="1" />
+		<edge from-layer="1367" from-port="2" to-layer="1370" to-port="0" />
+		<edge from-layer="1368" from-port="0" to-layer="1369" to-port="0" />
+		<edge from-layer="1369" from-port="1" to-layer="1370" to-port="1" />
+		<edge from-layer="1370" from-port="2" to-layer="1381" to-port="0" />
+		<edge from-layer="1371" from-port="0" to-layer="1372" to-port="0" />
+		<edge from-layer="1372" from-port="1" to-layer="1373" to-port="1" />
+		<edge from-layer="1373" from-port="2" to-layer="1376" to-port="0" />
+		<edge from-layer="1374" from-port="0" to-layer="1375" to-port="0" />
+		<edge from-layer="1375" from-port="1" to-layer="1376" to-port="1" />
+		<edge from-layer="1376" from-port="2" to-layer="1378" to-port="0" />
+		<edge from-layer="1377" from-port="0" to-layer="1378" to-port="1" />
+		<edge from-layer="1378" from-port="2" to-layer="1380" to-port="0" />
+		<edge from-layer="1379" from-port="0" to-layer="1380" to-port="1" />
+		<edge from-layer="1380" from-port="2" to-layer="1381" to-port="1" />
+		<edge from-layer="1381" from-port="2" to-layer="1383" to-port="0" />
+		<edge from-layer="1381" from-port="2" to-layer="1386" to-port="0" />
+		<edge from-layer="1382" from-port="0" to-layer="1383" to-port="1" />
+		<edge from-layer="1383" from-port="2" to-layer="1385" to-port="0" />
+		<edge from-layer="1384" from-port="0" to-layer="1385" to-port="1" />
+		<edge from-layer="1385" from-port="2" to-layer="1387" to-port="0" />
+		<edge from-layer="1386" from-port="1" to-layer="1387" to-port="1" />
+		<edge from-layer="1387" from-port="2" to-layer="1390" to-port="0" />
+		<edge from-layer="1388" from-port="0" to-layer="1389" to-port="0" />
+		<edge from-layer="1389" from-port="1" to-layer="1390" to-port="1" />
+		<edge from-layer="1390" from-port="2" to-layer="1393" to-port="0" />
+		<edge from-layer="1391" from-port="0" to-layer="1392" to-port="0" />
+		<edge from-layer="1392" from-port="1" to-layer="1393" to-port="1" />
+		<edge from-layer="1393" from-port="2" to-layer="1394" to-port="0" />
+		<edge from-layer="1394" from-port="1" to-layer="1397" to-port="0" />
+		<edge from-layer="1395" from-port="0" to-layer="1396" to-port="0" />
+		<edge from-layer="1396" from-port="1" to-layer="1397" to-port="1" />
+		<edge from-layer="1397" from-port="2" to-layer="1400" to-port="0" />
+		<edge from-layer="1398" from-port="0" to-layer="1399" to-port="0" />
+		<edge from-layer="1399" from-port="1" to-layer="1400" to-port="1" />
+		<edge from-layer="1400" from-port="2" to-layer="1401" to-port="1" />
+		<edge from-layer="1401" from-port="2" to-layer="1403" to-port="0" />
+		<edge from-layer="1401" from-port="2" to-layer="1582" to-port="1" />
+		<edge from-layer="1401" from-port="2" to-layer="1406" to-port="0" />
+		<edge from-layer="1402" from-port="0" to-layer="1403" to-port="1" />
+		<edge from-layer="1403" from-port="2" to-layer="1405" to-port="0" />
+		<edge from-layer="1404" from-port="0" to-layer="1405" to-port="1" />
+		<edge from-layer="1405" from-port="2" to-layer="1407" to-port="0" />
+		<edge from-layer="1406" from-port="1" to-layer="1407" to-port="1" />
+		<edge from-layer="1407" from-port="2" to-layer="1410" to-port="0" />
+		<edge from-layer="1408" from-port="0" to-layer="1409" to-port="0" />
+		<edge from-layer="1409" from-port="1" to-layer="1410" to-port="1" />
+		<edge from-layer="1410" from-port="2" to-layer="1413" to-port="0" />
+		<edge from-layer="1411" from-port="0" to-layer="1412" to-port="0" />
+		<edge from-layer="1412" from-port="1" to-layer="1413" to-port="1" />
+		<edge from-layer="1413" from-port="2" to-layer="1416" to-port="0" />
+		<edge from-layer="1414" from-port="0" to-layer="1415" to-port="0" />
+		<edge from-layer="1415" from-port="1" to-layer="1416" to-port="1" />
+		<edge from-layer="1416" from-port="2" to-layer="1419" to-port="0" />
+		<edge from-layer="1417" from-port="0" to-layer="1418" to-port="0" />
+		<edge from-layer="1418" from-port="1" to-layer="1419" to-port="1" />
+		<edge from-layer="1419" from-port="2" to-layer="1421" to-port="0" />
+		<edge from-layer="1420" from-port="0" to-layer="1421" to-port="1" />
+		<edge from-layer="1421" from-port="2" to-layer="1423" to-port="0" />
+		<edge from-layer="1422" from-port="0" to-layer="1423" to-port="1" />
+		<edge from-layer="1423" from-port="2" to-layer="1425" to-port="0" />
+		<edge from-layer="1423" from-port="2" to-layer="1475" to-port="1" />
+		<edge from-layer="1424" from-port="0" to-layer="1425" to-port="1" />
+		<edge from-layer="1425" from-port="2" to-layer="1428" to-port="0" />
+		<edge from-layer="1426" from-port="0" to-layer="1427" to-port="0" />
+		<edge from-layer="1427" from-port="1" to-layer="1428" to-port="1" />
+		<edge from-layer="1428" from-port="2" to-layer="1431" to-port="0" />
+		<edge from-layer="1429" from-port="0" to-layer="1430" to-port="0" />
+		<edge from-layer="1430" from-port="1" to-layer="1431" to-port="1" />
+		<edge from-layer="1431" from-port="2" to-layer="1434" to-port="0" />
+		<edge from-layer="1431" from-port="2" to-layer="1443" to-port="0" />
+		<edge from-layer="1431" from-port="2" to-layer="1457" to-port="0" />
+		<edge from-layer="1432" from-port="0" to-layer="1433" to-port="0" />
+		<edge from-layer="1433" from-port="1" to-layer="1434" to-port="1" />
+		<edge from-layer="1434" from-port="2" to-layer="1436" to-port="0" />
+		<edge from-layer="1435" from-port="0" to-layer="1436" to-port="1" />
+		<edge from-layer="1436" from-port="2" to-layer="1438" to-port="0" />
+		<edge from-layer="1437" from-port="0" to-layer="1438" to-port="1" />
+		<edge from-layer="1438" from-port="2" to-layer="1440" to-port="0" />
+		<edge from-layer="1439" from-port="0" to-layer="1440" to-port="1" />
+		<edge from-layer="1440" from-port="2" to-layer="1453" to-port="0" />
+		<edge from-layer="1441" from-port="0" to-layer="1442" to-port="0" />
+		<edge from-layer="1442" from-port="1" to-layer="1443" to-port="1" />
+		<edge from-layer="1443" from-port="2" to-layer="1445" to-port="0" />
+		<edge from-layer="1444" from-port="0" to-layer="1445" to-port="1" />
+		<edge from-layer="1445" from-port="2" to-layer="1447" to-port="0" />
+		<edge from-layer="1446" from-port="0" to-layer="1447" to-port="1" />
+		<edge from-layer="1447" from-port="2" to-layer="1449" to-port="0" />
+		<edge from-layer="1448" from-port="0" to-layer="1449" to-port="1" />
+		<edge from-layer="1449" from-port="2" to-layer="1452" to-port="0" />
+		<edge from-layer="1450" from-port="0" to-layer="1451" to-port="0" />
+		<edge from-layer="1451" from-port="1" to-layer="1452" to-port="1" />
+		<edge from-layer="1452" from-port="2" to-layer="1453" to-port="1" />
+		<edge from-layer="1453" from-port="2" to-layer="1454" to-port="0" />
+		<edge from-layer="1454" from-port="1" to-layer="1464" to-port="0" />
+		<edge from-layer="1455" from-port="0" to-layer="1456" to-port="0" />
+		<edge from-layer="1456" from-port="1" to-layer="1457" to-port="1" />
+		<edge from-layer="1457" from-port="2" to-layer="1459" to-port="0" />
+		<edge from-layer="1458" from-port="0" to-layer="1459" to-port="1" />
+		<edge from-layer="1459" from-port="2" to-layer="1461" to-port="0" />
+		<edge from-layer="1460" from-port="0" to-layer="1461" to-port="1" />
+		<edge from-layer="1461" from-port="2" to-layer="1463" to-port="0" />
+		<edge from-layer="1462" from-port="0" to-layer="1463" to-port="1" />
+		<edge from-layer="1463" from-port="2" to-layer="1464" to-port="1" />
+		<edge from-layer="1464" from-port="2" to-layer="1466" to-port="0" />
+		<edge from-layer="1465" from-port="0" to-layer="1466" to-port="1" />
+		<edge from-layer="1466" from-port="2" to-layer="1468" to-port="0" />
+		<edge from-layer="1467" from-port="0" to-layer="1468" to-port="1" />
+		<edge from-layer="1468" from-port="2" to-layer="1470" to-port="0" />
+		<edge from-layer="1469" from-port="0" to-layer="1470" to-port="1" />
+		<edge from-layer="1470" from-port="2" to-layer="1473" to-port="0" />
+		<edge from-layer="1471" from-port="0" to-layer="1472" to-port="0" />
+		<edge from-layer="1472" from-port="1" to-layer="1473" to-port="1" />
+		<edge from-layer="1473" from-port="2" to-layer="1474" to-port="1" />
+		<edge from-layer="1474" from-port="2" to-layer="1475" to-port="0" />
+		<edge from-layer="1475" from-port="2" to-layer="1477" to-port="0" />
+		<edge from-layer="1475" from-port="2" to-layer="1527" to-port="1" />
+		<edge from-layer="1476" from-port="0" to-layer="1477" to-port="1" />
+		<edge from-layer="1477" from-port="2" to-layer="1480" to-port="0" />
+		<edge from-layer="1478" from-port="0" to-layer="1479" to-port="0" />
+		<edge from-layer="1479" from-port="1" to-layer="1480" to-port="1" />
+		<edge from-layer="1480" from-port="2" to-layer="1483" to-port="0" />
+		<edge from-layer="1481" from-port="0" to-layer="1482" to-port="0" />
+		<edge from-layer="1482" from-port="1" to-layer="1483" to-port="1" />
+		<edge from-layer="1483" from-port="2" to-layer="1486" to-port="0" />
+		<edge from-layer="1484" from-port="0" to-layer="1485" to-port="0" />
+		<edge from-layer="1485" from-port="1" to-layer="1486" to-port="1" />
+		<edge from-layer="1486" from-port="2" to-layer="1488" to-port="0" />
+		<edge from-layer="1487" from-port="0" to-layer="1488" to-port="1" />
+		<edge from-layer="1488" from-port="2" to-layer="1490" to-port="0" />
+		<edge from-layer="1489" from-port="0" to-layer="1490" to-port="1" />
+		<edge from-layer="1490" from-port="2" to-layer="1492" to-port="0" />
+		<edge from-layer="1491" from-port="0" to-layer="1492" to-port="1" />
+		<edge from-layer="1492" from-port="2" to-layer="1505" to-port="0" />
+		<edge from-layer="1493" from-port="0" to-layer="1494" to-port="0" />
+		<edge from-layer="1494" from-port="1" to-layer="1495" to-port="1" />
+		<edge from-layer="1495" from-port="2" to-layer="1497" to-port="0" />
+		<edge from-layer="1496" from-port="0" to-layer="1497" to-port="1" />
+		<edge from-layer="1497" from-port="2" to-layer="1499" to-port="0" />
+		<edge from-layer="1498" from-port="0" to-layer="1499" to-port="1" />
+		<edge from-layer="1499" from-port="2" to-layer="1501" to-port="0" />
+		<edge from-layer="1500" from-port="0" to-layer="1501" to-port="1" />
+		<edge from-layer="1501" from-port="2" to-layer="1504" to-port="0" />
+		<edge from-layer="1502" from-port="0" to-layer="1503" to-port="0" />
+		<edge from-layer="1503" from-port="1" to-layer="1504" to-port="1" />
+		<edge from-layer="1504" from-port="2" to-layer="1505" to-port="1" />
+		<edge from-layer="1505" from-port="2" to-layer="1506" to-port="0" />
+		<edge from-layer="1506" from-port="1" to-layer="1516" to-port="0" />
+		<edge from-layer="1507" from-port="0" to-layer="1508" to-port="0" />
+		<edge from-layer="1508" from-port="1" to-layer="1509" to-port="1" />
+		<edge from-layer="1509" from-port="2" to-layer="1511" to-port="0" />
+		<edge from-layer="1510" from-port="0" to-layer="1511" to-port="1" />
+		<edge from-layer="1511" from-port="2" to-layer="1513" to-port="0" />
+		<edge from-layer="1512" from-port="0" to-layer="1513" to-port="1" />
+		<edge from-layer="1513" from-port="2" to-layer="1515" to-port="0" />
+		<edge from-layer="1514" from-port="0" to-layer="1515" to-port="1" />
+		<edge from-layer="1515" from-port="2" to-layer="1516" to-port="1" />
+		<edge from-layer="1516" from-port="2" to-layer="1518" to-port="0" />
+		<edge from-layer="1517" from-port="0" to-layer="1518" to-port="1" />
+		<edge from-layer="1518" from-port="2" to-layer="1520" to-port="0" />
+		<edge from-layer="1519" from-port="0" to-layer="1520" to-port="1" />
+		<edge from-layer="1520" from-port="2" to-layer="1522" to-port="0" />
+		<edge from-layer="1521" from-port="0" to-layer="1522" to-port="1" />
+		<edge from-layer="1522" from-port="2" to-layer="1525" to-port="0" />
+		<edge from-layer="1523" from-port="0" to-layer="1524" to-port="0" />
+		<edge from-layer="1524" from-port="1" to-layer="1525" to-port="1" />
+		<edge from-layer="1525" from-port="2" to-layer="1526" to-port="1" />
+		<edge from-layer="1526" from-port="2" to-layer="1527" to-port="0" />
+		<edge from-layer="1527" from-port="2" to-layer="1571" to-port="1" />
+		<edge from-layer="1527" from-port="2" to-layer="1529" to-port="0" />
+		<edge from-layer="1528" from-port="0" to-layer="1529" to-port="1" />
+		<edge from-layer="1529" from-port="2" to-layer="1532" to-port="0" />
+		<edge from-layer="1530" from-port="0" to-layer="1531" to-port="0" />
+		<edge from-layer="1531" from-port="1" to-layer="1532" to-port="1" />
+		<edge from-layer="1532" from-port="2" to-layer="1535" to-port="0" />
+		<edge from-layer="1533" from-port="0" to-layer="1534" to-port="0" />
+		<edge from-layer="1534" from-port="1" to-layer="1535" to-port="1" />
+		<edge from-layer="1535" from-port="2" to-layer="1538" to-port="0" />
+		<edge from-layer="1536" from-port="0" to-layer="1537" to-port="0" />
+		<edge from-layer="1537" from-port="1" to-layer="1538" to-port="1" />
+		<edge from-layer="1538" from-port="2" to-layer="1539" to-port="1" />
+		<edge from-layer="1539" from-port="2" to-layer="1564" to-port="0" />
+		<edge from-layer="1539" from-port="2" to-layer="1543" to-port="0" />
+		<edge from-layer="1539" from-port="2" to-layer="1554" to-port="0" />
+		<edge from-layer="1540" from-port="0" to-layer="1554" to-port="1" />
+		<edge from-layer="1541" from-port="0" to-layer="1552" to-port="0" />
+		<edge from-layer="1542" from-port="0" to-layer="1552" to-port="1" />
+		<edge from-layer="1543" from-port="1" to-layer="1546" to-port="0" />
+		<edge from-layer="1544" from-port="0" to-layer="1546" to-port="1" />
+		<edge from-layer="1545" from-port="0" to-layer="1546" to-port="2" />
+		<edge from-layer="1546" from-port="3" to-layer="1548" to-port="0" />
+		<edge from-layer="1547" from-port="0" to-layer="1548" to-port="1" />
+		<edge from-layer="1548" from-port="2" to-layer="1550" to-port="0" />
+		<edge from-layer="1549" from-port="0" to-layer="1550" to-port="1" />
+		<edge from-layer="1550" from-port="2" to-layer="1558" to-port="2" />
+		<edge from-layer="1550" from-port="2" to-layer="1552" to-port="2" />
+		<edge from-layer="1550" from-port="2" to-layer="1561" to-port="0" />
+		<edge from-layer="1551" from-port="0" to-layer="1552" to-port="3" />
+		<edge from-layer="1552" from-port="4" to-layer="1554" to-port="2" />
+		<edge from-layer="1553" from-port="0" to-layer="1554" to-port="3" />
+		<edge from-layer="1554" from-port="4" to-layer="1566" to-port="0" />
+		<edge from-layer="1555" from-port="0" to-layer="1558" to-port="0" />
+		<edge from-layer="1556" from-port="0" to-layer="1558" to-port="1" />
+		<edge from-layer="1556" from-port="0" to-layer="1562" to-port="1" />
+		<edge from-layer="1557" from-port="0" to-layer="1562" to-port="3" />
+		<edge from-layer="1557" from-port="0" to-layer="1558" to-port="3" />
+		<edge from-layer="1558" from-port="4" to-layer="1564" to-port="1" />
+		<edge from-layer="1559" from-port="0" to-layer="1562" to-port="0" />
+		<edge from-layer="1560" from-port="0" to-layer="1561" to-port="1" />
+		<edge from-layer="1561" from-port="2" to-layer="1562" to-port="2" />
+		<edge from-layer="1562" from-port="4" to-layer="1564" to-port="2" />
+		<edge from-layer="1563" from-port="0" to-layer="1564" to-port="3" />
+		<edge from-layer="1564" from-port="4" to-layer="1565" to-port="0" />
+		<edge from-layer="1565" from-port="1" to-layer="1566" to-port="1" />
+		<edge from-layer="1566" from-port="2" to-layer="1569" to-port="0" />
+		<edge from-layer="1567" from-port="0" to-layer="1568" to-port="0" />
+		<edge from-layer="1568" from-port="1" to-layer="1569" to-port="1" />
+		<edge from-layer="1569" from-port="2" to-layer="1570" to-port="1" />
+		<edge from-layer="1570" from-port="2" to-layer="1571" to-port="0" />
+		<edge from-layer="1571" from-port="2" to-layer="1573" to-port="0" />
+		<edge from-layer="1572" from-port="0" to-layer="1573" to-port="1" />
+		<edge from-layer="1573" from-port="2" to-layer="1575" to-port="0" />
+		<edge from-layer="1574" from-port="0" to-layer="1575" to-port="1" />
+		<edge from-layer="1575" from-port="2" to-layer="1578" to-port="0" />
+		<edge from-layer="1576" from-port="0" to-layer="1577" to-port="0" />
+		<edge from-layer="1577" from-port="1" to-layer="1578" to-port="1" />
+		<edge from-layer="1578" from-port="2" to-layer="1581" to-port="0" />
+		<edge from-layer="1579" from-port="0" to-layer="1580" to-port="0" />
+		<edge from-layer="1580" from-port="1" to-layer="1581" to-port="1" />
+		<edge from-layer="1581" from-port="2" to-layer="1582" to-port="0" />
+		<edge from-layer="1582" from-port="2" to-layer="1585" to-port="0" />
+		<edge from-layer="1582" from-port="2" to-layer="2153" to-port="1" />
+		<edge from-layer="1583" from-port="0" to-layer="1584" to-port="0" />
+		<edge from-layer="1584" from-port="1" to-layer="1585" to-port="1" />
+		<edge from-layer="1585" from-port="2" to-layer="1588" to-port="0" />
+		<edge from-layer="1586" from-port="0" to-layer="1587" to-port="0" />
+		<edge from-layer="1587" from-port="1" to-layer="1588" to-port="1" />
+		<edge from-layer="1588" from-port="2" to-layer="1638" to-port="0" />
+		<edge from-layer="1588" from-port="2" to-layer="2084" to-port="1" />
+		<edge from-layer="1588" from-port="2" to-layer="1590" to-port="0" />
+		<edge from-layer="1588" from-port="2" to-layer="1593" to-port="0" />
+		<edge from-layer="1589" from-port="0" to-layer="1590" to-port="1" />
+		<edge from-layer="1590" from-port="2" to-layer="1592" to-port="0" />
+		<edge from-layer="1591" from-port="0" to-layer="1592" to-port="1" />
+		<edge from-layer="1592" from-port="2" to-layer="1594" to-port="0" />
+		<edge from-layer="1593" from-port="1" to-layer="1594" to-port="1" />
+		<edge from-layer="1594" from-port="2" to-layer="1597" to-port="0" />
+		<edge from-layer="1595" from-port="0" to-layer="1596" to-port="0" />
+		<edge from-layer="1596" from-port="1" to-layer="1597" to-port="1" />
+		<edge from-layer="1597" from-port="2" to-layer="1600" to-port="0" />
+		<edge from-layer="1598" from-port="0" to-layer="1599" to-port="0" />
+		<edge from-layer="1599" from-port="1" to-layer="1600" to-port="1" />
+		<edge from-layer="1600" from-port="2" to-layer="1601" to-port="0" />
+		<edge from-layer="1601" from-port="1" to-layer="1604" to-port="0" />
+		<edge from-layer="1602" from-port="0" to-layer="1603" to-port="0" />
+		<edge from-layer="1603" from-port="1" to-layer="1604" to-port="1" />
+		<edge from-layer="1604" from-port="2" to-layer="1607" to-port="0" />
+		<edge from-layer="1605" from-port="0" to-layer="1606" to-port="0" />
+		<edge from-layer="1606" from-port="1" to-layer="1607" to-port="1" />
+		<edge from-layer="1607" from-port="2" to-layer="1618" to-port="0" />
+		<edge from-layer="1608" from-port="0" to-layer="1609" to-port="0" />
+		<edge from-layer="1609" from-port="1" to-layer="1610" to-port="1" />
+		<edge from-layer="1610" from-port="2" to-layer="1613" to-port="0" />
+		<edge from-layer="1611" from-port="0" to-layer="1612" to-port="0" />
+		<edge from-layer="1612" from-port="1" to-layer="1613" to-port="1" />
+		<edge from-layer="1613" from-port="2" to-layer="1615" to-port="0" />
+		<edge from-layer="1614" from-port="0" to-layer="1615" to-port="1" />
+		<edge from-layer="1615" from-port="2" to-layer="1617" to-port="0" />
+		<edge from-layer="1616" from-port="0" to-layer="1617" to-port="1" />
+		<edge from-layer="1617" from-port="2" to-layer="1618" to-port="1" />
+		<edge from-layer="1618" from-port="2" to-layer="1620" to-port="0" />
+		<edge from-layer="1618" from-port="2" to-layer="1623" to-port="0" />
+		<edge from-layer="1619" from-port="0" to-layer="1620" to-port="1" />
+		<edge from-layer="1620" from-port="2" to-layer="1622" to-port="0" />
+		<edge from-layer="1621" from-port="0" to-layer="1622" to-port="1" />
+		<edge from-layer="1622" from-port="2" to-layer="1624" to-port="0" />
+		<edge from-layer="1623" from-port="1" to-layer="1624" to-port="1" />
+		<edge from-layer="1624" from-port="2" to-layer="1627" to-port="0" />
+		<edge from-layer="1625" from-port="0" to-layer="1626" to-port="0" />
+		<edge from-layer="1626" from-port="1" to-layer="1627" to-port="1" />
+		<edge from-layer="1627" from-port="2" to-layer="1630" to-port="0" />
+		<edge from-layer="1628" from-port="0" to-layer="1629" to-port="0" />
+		<edge from-layer="1629" from-port="1" to-layer="1630" to-port="1" />
+		<edge from-layer="1630" from-port="2" to-layer="1631" to-port="0" />
+		<edge from-layer="1631" from-port="1" to-layer="1634" to-port="0" />
+		<edge from-layer="1632" from-port="0" to-layer="1633" to-port="0" />
+		<edge from-layer="1633" from-port="1" to-layer="1634" to-port="1" />
+		<edge from-layer="1634" from-port="2" to-layer="1637" to-port="0" />
+		<edge from-layer="1635" from-port="0" to-layer="1636" to-port="0" />
+		<edge from-layer="1636" from-port="1" to-layer="1637" to-port="1" />
+		<edge from-layer="1637" from-port="2" to-layer="1638" to-port="1" />
+		<edge from-layer="1638" from-port="2" to-layer="2027" to-port="1" />
+		<edge from-layer="1638" from-port="2" to-layer="1640" to-port="0" />
+		<edge from-layer="1638" from-port="2" to-layer="1643" to-port="0" />
+		<edge from-layer="1638" from-port="2" to-layer="1688" to-port="0" />
+		<edge from-layer="1639" from-port="0" to-layer="1640" to-port="1" />
+		<edge from-layer="1640" from-port="2" to-layer="1642" to-port="0" />
+		<edge from-layer="1641" from-port="0" to-layer="1642" to-port="1" />
+		<edge from-layer="1642" from-port="2" to-layer="1644" to-port="0" />
+		<edge from-layer="1643" from-port="1" to-layer="1644" to-port="1" />
+		<edge from-layer="1644" from-port="2" to-layer="1647" to-port="0" />
+		<edge from-layer="1645" from-port="0" to-layer="1646" to-port="0" />
+		<edge from-layer="1646" from-port="1" to-layer="1647" to-port="1" />
+		<edge from-layer="1647" from-port="2" to-layer="1650" to-port="0" />
+		<edge from-layer="1648" from-port="0" to-layer="1649" to-port="0" />
+		<edge from-layer="1649" from-port="1" to-layer="1650" to-port="1" />
+		<edge from-layer="1650" from-port="2" to-layer="1651" to-port="0" />
+		<edge from-layer="1651" from-port="1" to-layer="1654" to-port="0" />
+		<edge from-layer="1652" from-port="0" to-layer="1653" to-port="0" />
+		<edge from-layer="1653" from-port="1" to-layer="1654" to-port="1" />
+		<edge from-layer="1654" from-port="2" to-layer="1657" to-port="0" />
+		<edge from-layer="1655" from-port="0" to-layer="1656" to-port="0" />
+		<edge from-layer="1656" from-port="1" to-layer="1657" to-port="1" />
+		<edge from-layer="1657" from-port="2" to-layer="1668" to-port="0" />
+		<edge from-layer="1658" from-port="0" to-layer="1659" to-port="0" />
+		<edge from-layer="1659" from-port="1" to-layer="1660" to-port="1" />
+		<edge from-layer="1660" from-port="2" to-layer="1663" to-port="0" />
+		<edge from-layer="1661" from-port="0" to-layer="1662" to-port="0" />
+		<edge from-layer="1662" from-port="1" to-layer="1663" to-port="1" />
+		<edge from-layer="1663" from-port="2" to-layer="1665" to-port="0" />
+		<edge from-layer="1664" from-port="0" to-layer="1665" to-port="1" />
+		<edge from-layer="1665" from-port="2" to-layer="1667" to-port="0" />
+		<edge from-layer="1666" from-port="0" to-layer="1667" to-port="1" />
+		<edge from-layer="1667" from-port="2" to-layer="1668" to-port="1" />
+		<edge from-layer="1668" from-port="2" to-layer="1670" to-port="0" />
+		<edge from-layer="1668" from-port="2" to-layer="1673" to-port="0" />
+		<edge from-layer="1669" from-port="0" to-layer="1670" to-port="1" />
+		<edge from-layer="1670" from-port="2" to-layer="1672" to-port="0" />
+		<edge from-layer="1671" from-port="0" to-layer="1672" to-port="1" />
+		<edge from-layer="1672" from-port="2" to-layer="1674" to-port="0" />
+		<edge from-layer="1673" from-port="1" to-layer="1674" to-port="1" />
+		<edge from-layer="1674" from-port="2" to-layer="1677" to-port="0" />
+		<edge from-layer="1675" from-port="0" to-layer="1676" to-port="0" />
+		<edge from-layer="1676" from-port="1" to-layer="1677" to-port="1" />
+		<edge from-layer="1677" from-port="2" to-layer="1680" to-port="0" />
+		<edge from-layer="1678" from-port="0" to-layer="1679" to-port="0" />
+		<edge from-layer="1679" from-port="1" to-layer="1680" to-port="1" />
+		<edge from-layer="1680" from-port="2" to-layer="1681" to-port="0" />
+		<edge from-layer="1681" from-port="1" to-layer="1684" to-port="0" />
+		<edge from-layer="1682" from-port="0" to-layer="1683" to-port="0" />
+		<edge from-layer="1683" from-port="1" to-layer="1684" to-port="1" />
+		<edge from-layer="1684" from-port="2" to-layer="1687" to-port="0" />
+		<edge from-layer="1685" from-port="0" to-layer="1686" to-port="0" />
+		<edge from-layer="1686" from-port="1" to-layer="1687" to-port="1" />
+		<edge from-layer="1687" from-port="2" to-layer="1688" to-port="1" />
+		<edge from-layer="1688" from-port="2" to-layer="1693" to-port="0" />
+		<edge from-layer="1688" from-port="2" to-layer="1970" to-port="1" />
+		<edge from-layer="1688" from-port="2" to-layer="1738" to-port="0" />
+		<edge from-layer="1688" from-port="2" to-layer="1690" to-port="0" />
+		<edge from-layer="1689" from-port="0" to-layer="1690" to-port="1" />
+		<edge from-layer="1690" from-port="2" to-layer="1692" to-port="0" />
+		<edge from-layer="1691" from-port="0" to-layer="1692" to-port="1" />
+		<edge from-layer="1692" from-port="2" to-layer="1694" to-port="0" />
+		<edge from-layer="1693" from-port="1" to-layer="1694" to-port="1" />
+		<edge from-layer="1694" from-port="2" to-layer="1697" to-port="0" />
+		<edge from-layer="1695" from-port="0" to-layer="1696" to-port="0" />
+		<edge from-layer="1696" from-port="1" to-layer="1697" to-port="1" />
+		<edge from-layer="1697" from-port="2" to-layer="1700" to-port="0" />
+		<edge from-layer="1698" from-port="0" to-layer="1699" to-port="0" />
+		<edge from-layer="1699" from-port="1" to-layer="1700" to-port="1" />
+		<edge from-layer="1700" from-port="2" to-layer="1701" to-port="0" />
+		<edge from-layer="1701" from-port="1" to-layer="1704" to-port="0" />
+		<edge from-layer="1702" from-port="0" to-layer="1703" to-port="0" />
+		<edge from-layer="1703" from-port="1" to-layer="1704" to-port="1" />
+		<edge from-layer="1704" from-port="2" to-layer="1707" to-port="0" />
+		<edge from-layer="1705" from-port="0" to-layer="1706" to-port="0" />
+		<edge from-layer="1706" from-port="1" to-layer="1707" to-port="1" />
+		<edge from-layer="1707" from-port="2" to-layer="1718" to-port="0" />
+		<edge from-layer="1708" from-port="0" to-layer="1709" to-port="0" />
+		<edge from-layer="1709" from-port="1" to-layer="1710" to-port="1" />
+		<edge from-layer="1710" from-port="2" to-layer="1713" to-port="0" />
+		<edge from-layer="1711" from-port="0" to-layer="1712" to-port="0" />
+		<edge from-layer="1712" from-port="1" to-layer="1713" to-port="1" />
+		<edge from-layer="1713" from-port="2" to-layer="1715" to-port="0" />
+		<edge from-layer="1714" from-port="0" to-layer="1715" to-port="1" />
+		<edge from-layer="1715" from-port="2" to-layer="1717" to-port="0" />
+		<edge from-layer="1716" from-port="0" to-layer="1717" to-port="1" />
+		<edge from-layer="1717" from-port="2" to-layer="1718" to-port="1" />
+		<edge from-layer="1718" from-port="2" to-layer="1720" to-port="0" />
+		<edge from-layer="1718" from-port="2" to-layer="1723" to-port="0" />
+		<edge from-layer="1719" from-port="0" to-layer="1720" to-port="1" />
+		<edge from-layer="1720" from-port="2" to-layer="1722" to-port="0" />
+		<edge from-layer="1721" from-port="0" to-layer="1722" to-port="1" />
+		<edge from-layer="1722" from-port="2" to-layer="1724" to-port="0" />
+		<edge from-layer="1723" from-port="1" to-layer="1724" to-port="1" />
+		<edge from-layer="1724" from-port="2" to-layer="1727" to-port="0" />
+		<edge from-layer="1725" from-port="0" to-layer="1726" to-port="0" />
+		<edge from-layer="1726" from-port="1" to-layer="1727" to-port="1" />
+		<edge from-layer="1727" from-port="2" to-layer="1730" to-port="0" />
+		<edge from-layer="1728" from-port="0" to-layer="1729" to-port="0" />
+		<edge from-layer="1729" from-port="1" to-layer="1730" to-port="1" />
+		<edge from-layer="1730" from-port="2" to-layer="1731" to-port="0" />
+		<edge from-layer="1731" from-port="1" to-layer="1734" to-port="0" />
+		<edge from-layer="1732" from-port="0" to-layer="1733" to-port="0" />
+		<edge from-layer="1733" from-port="1" to-layer="1734" to-port="1" />
+		<edge from-layer="1734" from-port="2" to-layer="1737" to-port="0" />
+		<edge from-layer="1735" from-port="0" to-layer="1736" to-port="0" />
+		<edge from-layer="1736" from-port="1" to-layer="1737" to-port="1" />
+		<edge from-layer="1737" from-port="2" to-layer="1738" to-port="1" />
+		<edge from-layer="1738" from-port="2" to-layer="1919" to-port="1" />
+		<edge from-layer="1738" from-port="2" to-layer="1743" to-port="0" />
+		<edge from-layer="1738" from-port="2" to-layer="1740" to-port="0" />
+		<edge from-layer="1739" from-port="0" to-layer="1740" to-port="1" />
+		<edge from-layer="1740" from-port="2" to-layer="1742" to-port="0" />
+		<edge from-layer="1741" from-port="0" to-layer="1742" to-port="1" />
+		<edge from-layer="1742" from-port="2" to-layer="1744" to-port="0" />
+		<edge from-layer="1743" from-port="1" to-layer="1744" to-port="1" />
+		<edge from-layer="1744" from-port="2" to-layer="1747" to-port="0" />
+		<edge from-layer="1745" from-port="0" to-layer="1746" to-port="0" />
+		<edge from-layer="1746" from-port="1" to-layer="1747" to-port="1" />
+		<edge from-layer="1747" from-port="2" to-layer="1750" to-port="0" />
+		<edge from-layer="1748" from-port="0" to-layer="1749" to-port="0" />
+		<edge from-layer="1749" from-port="1" to-layer="1750" to-port="1" />
+		<edge from-layer="1750" from-port="2" to-layer="1753" to-port="0" />
+		<edge from-layer="1751" from-port="0" to-layer="1752" to-port="0" />
+		<edge from-layer="1752" from-port="1" to-layer="1753" to-port="1" />
+		<edge from-layer="1753" from-port="2" to-layer="1756" to-port="0" />
+		<edge from-layer="1754" from-port="0" to-layer="1755" to-port="0" />
+		<edge from-layer="1755" from-port="1" to-layer="1756" to-port="1" />
+		<edge from-layer="1756" from-port="2" to-layer="1758" to-port="0" />
+		<edge from-layer="1757" from-port="0" to-layer="1758" to-port="1" />
+		<edge from-layer="1758" from-port="2" to-layer="1760" to-port="0" />
+		<edge from-layer="1759" from-port="0" to-layer="1760" to-port="1" />
+		<edge from-layer="1760" from-port="2" to-layer="1762" to-port="0" />
+		<edge from-layer="1760" from-port="2" to-layer="1812" to-port="1" />
+		<edge from-layer="1761" from-port="0" to-layer="1762" to-port="1" />
+		<edge from-layer="1762" from-port="2" to-layer="1765" to-port="0" />
+		<edge from-layer="1763" from-port="0" to-layer="1764" to-port="0" />
+		<edge from-layer="1764" from-port="1" to-layer="1765" to-port="1" />
+		<edge from-layer="1765" from-port="2" to-layer="1768" to-port="0" />
+		<edge from-layer="1766" from-port="0" to-layer="1767" to-port="0" />
+		<edge from-layer="1767" from-port="1" to-layer="1768" to-port="1" />
+		<edge from-layer="1768" from-port="2" to-layer="1771" to-port="0" />
+		<edge from-layer="1768" from-port="2" to-layer="1780" to-port="0" />
+		<edge from-layer="1768" from-port="2" to-layer="1794" to-port="0" />
+		<edge from-layer="1769" from-port="0" to-layer="1770" to-port="0" />
+		<edge from-layer="1770" from-port="1" to-layer="1771" to-port="1" />
+		<edge from-layer="1771" from-port="2" to-layer="1773" to-port="0" />
+		<edge from-layer="1772" from-port="0" to-layer="1773" to-port="1" />
+		<edge from-layer="1773" from-port="2" to-layer="1775" to-port="0" />
+		<edge from-layer="1774" from-port="0" to-layer="1775" to-port="1" />
+		<edge from-layer="1775" from-port="2" to-layer="1777" to-port="0" />
+		<edge from-layer="1776" from-port="0" to-layer="1777" to-port="1" />
+		<edge from-layer="1777" from-port="2" to-layer="1790" to-port="0" />
+		<edge from-layer="1778" from-port="0" to-layer="1779" to-port="0" />
+		<edge from-layer="1779" from-port="1" to-layer="1780" to-port="1" />
+		<edge from-layer="1780" from-port="2" to-layer="1782" to-port="0" />
+		<edge from-layer="1781" from-port="0" to-layer="1782" to-port="1" />
+		<edge from-layer="1782" from-port="2" to-layer="1784" to-port="0" />
+		<edge from-layer="1783" from-port="0" to-layer="1784" to-port="1" />
+		<edge from-layer="1784" from-port="2" to-layer="1786" to-port="0" />
+		<edge from-layer="1785" from-port="0" to-layer="1786" to-port="1" />
+		<edge from-layer="1786" from-port="2" to-layer="1789" to-port="0" />
+		<edge from-layer="1787" from-port="0" to-layer="1788" to-port="0" />
+		<edge from-layer="1788" from-port="1" to-layer="1789" to-port="1" />
+		<edge from-layer="1789" from-port="2" to-layer="1790" to-port="1" />
+		<edge from-layer="1790" from-port="2" to-layer="1791" to-port="0" />
+		<edge from-layer="1791" from-port="1" to-layer="1801" to-port="0" />
+		<edge from-layer="1792" from-port="0" to-layer="1793" to-port="0" />
+		<edge from-layer="1793" from-port="1" to-layer="1794" to-port="1" />
+		<edge from-layer="1794" from-port="2" to-layer="1796" to-port="0" />
+		<edge from-layer="1795" from-port="0" to-layer="1796" to-port="1" />
+		<edge from-layer="1796" from-port="2" to-layer="1798" to-port="0" />
+		<edge from-layer="1797" from-port="0" to-layer="1798" to-port="1" />
+		<edge from-layer="1798" from-port="2" to-layer="1800" to-port="0" />
+		<edge from-layer="1799" from-port="0" to-layer="1800" to-port="1" />
+		<edge from-layer="1800" from-port="2" to-layer="1801" to-port="1" />
+		<edge from-layer="1801" from-port="2" to-layer="1803" to-port="0" />
+		<edge from-layer="1802" from-port="0" to-layer="1803" to-port="1" />
+		<edge from-layer="1803" from-port="2" to-layer="1805" to-port="0" />
+		<edge from-layer="1804" from-port="0" to-layer="1805" to-port="1" />
+		<edge from-layer="1805" from-port="2" to-layer="1807" to-port="0" />
+		<edge from-layer="1806" from-port="0" to-layer="1807" to-port="1" />
+		<edge from-layer="1807" from-port="2" to-layer="1810" to-port="0" />
+		<edge from-layer="1808" from-port="0" to-layer="1809" to-port="0" />
+		<edge from-layer="1809" from-port="1" to-layer="1810" to-port="1" />
+		<edge from-layer="1810" from-port="2" to-layer="1811" to-port="1" />
+		<edge from-layer="1811" from-port="2" to-layer="1812" to-port="0" />
+		<edge from-layer="1812" from-port="2" to-layer="1864" to-port="1" />
+		<edge from-layer="1812" from-port="2" to-layer="1814" to-port="0" />
+		<edge from-layer="1813" from-port="0" to-layer="1814" to-port="1" />
+		<edge from-layer="1814" from-port="2" to-layer="1817" to-port="0" />
+		<edge from-layer="1815" from-port="0" to-layer="1816" to-port="0" />
+		<edge from-layer="1816" from-port="1" to-layer="1817" to-port="1" />
+		<edge from-layer="1817" from-port="2" to-layer="1820" to-port="0" />
+		<edge from-layer="1818" from-port="0" to-layer="1819" to-port="0" />
+		<edge from-layer="1819" from-port="1" to-layer="1820" to-port="1" />
+		<edge from-layer="1820" from-port="2" to-layer="1823" to-port="0" />
+		<edge from-layer="1821" from-port="0" to-layer="1822" to-port="0" />
+		<edge from-layer="1822" from-port="1" to-layer="1823" to-port="1" />
+		<edge from-layer="1823" from-port="2" to-layer="1825" to-port="0" />
+		<edge from-layer="1824" from-port="0" to-layer="1825" to-port="1" />
+		<edge from-layer="1825" from-port="2" to-layer="1827" to-port="0" />
+		<edge from-layer="1826" from-port="0" to-layer="1827" to-port="1" />
+		<edge from-layer="1827" from-port="2" to-layer="1829" to-port="0" />
+		<edge from-layer="1828" from-port="0" to-layer="1829" to-port="1" />
+		<edge from-layer="1829" from-port="2" to-layer="1842" to-port="0" />
+		<edge from-layer="1830" from-port="0" to-layer="1831" to-port="0" />
+		<edge from-layer="1831" from-port="1" to-layer="1832" to-port="1" />
+		<edge from-layer="1832" from-port="2" to-layer="1834" to-port="0" />
+		<edge from-layer="1833" from-port="0" to-layer="1834" to-port="1" />
+		<edge from-layer="1834" from-port="2" to-layer="1836" to-port="0" />
+		<edge from-layer="1835" from-port="0" to-layer="1836" to-port="1" />
+		<edge from-layer="1836" from-port="2" to-layer="1838" to-port="0" />
+		<edge from-layer="1837" from-port="0" to-layer="1838" to-port="1" />
+		<edge from-layer="1838" from-port="2" to-layer="1841" to-port="0" />
+		<edge from-layer="1839" from-port="0" to-layer="1840" to-port="0" />
+		<edge from-layer="1840" from-port="1" to-layer="1841" to-port="1" />
+		<edge from-layer="1841" from-port="2" to-layer="1842" to-port="1" />
+		<edge from-layer="1842" from-port="2" to-layer="1843" to-port="0" />
+		<edge from-layer="1843" from-port="1" to-layer="1853" to-port="0" />
+		<edge from-layer="1844" from-port="0" to-layer="1845" to-port="0" />
+		<edge from-layer="1845" from-port="1" to-layer="1846" to-port="1" />
+		<edge from-layer="1846" from-port="2" to-layer="1848" to-port="0" />
+		<edge from-layer="1847" from-port="0" to-layer="1848" to-port="1" />
+		<edge from-layer="1848" from-port="2" to-layer="1850" to-port="0" />
+		<edge from-layer="1849" from-port="0" to-layer="1850" to-port="1" />
+		<edge from-layer="1850" from-port="2" to-layer="1852" to-port="0" />
+		<edge from-layer="1851" from-port="0" to-layer="1852" to-port="1" />
+		<edge from-layer="1852" from-port="2" to-layer="1853" to-port="1" />
+		<edge from-layer="1853" from-port="2" to-layer="1855" to-port="0" />
+		<edge from-layer="1854" from-port="0" to-layer="1855" to-port="1" />
+		<edge from-layer="1855" from-port="2" to-layer="1857" to-port="0" />
+		<edge from-layer="1856" from-port="0" to-layer="1857" to-port="1" />
+		<edge from-layer="1857" from-port="2" to-layer="1859" to-port="0" />
+		<edge from-layer="1858" from-port="0" to-layer="1859" to-port="1" />
+		<edge from-layer="1859" from-port="2" to-layer="1862" to-port="0" />
+		<edge from-layer="1860" from-port="0" to-layer="1861" to-port="0" />
+		<edge from-layer="1861" from-port="1" to-layer="1862" to-port="1" />
+		<edge from-layer="1862" from-port="2" to-layer="1863" to-port="1" />
+		<edge from-layer="1863" from-port="2" to-layer="1864" to-port="0" />
+		<edge from-layer="1864" from-port="2" to-layer="1908" to-port="1" />
+		<edge from-layer="1864" from-port="2" to-layer="1866" to-port="0" />
+		<edge from-layer="1865" from-port="0" to-layer="1866" to-port="1" />
+		<edge from-layer="1866" from-port="2" to-layer="1869" to-port="0" />
+		<edge from-layer="1867" from-port="0" to-layer="1868" to-port="0" />
+		<edge from-layer="1868" from-port="1" to-layer="1869" to-port="1" />
+		<edge from-layer="1869" from-port="2" to-layer="1872" to-port="0" />
+		<edge from-layer="1870" from-port="0" to-layer="1871" to-port="0" />
+		<edge from-layer="1871" from-port="1" to-layer="1872" to-port="1" />
+		<edge from-layer="1872" from-port="2" to-layer="1875" to-port="0" />
+		<edge from-layer="1873" from-port="0" to-layer="1874" to-port="0" />
+		<edge from-layer="1874" from-port="1" to-layer="1875" to-port="1" />
+		<edge from-layer="1875" from-port="2" to-layer="1876" to-port="1" />
+		<edge from-layer="1876" from-port="2" to-layer="1891" to-port="0" />
+		<edge from-layer="1876" from-port="2" to-layer="1880" to-port="0" />
+		<edge from-layer="1876" from-port="2" to-layer="1901" to-port="0" />
+		<edge from-layer="1877" from-port="0" to-layer="1891" to-port="1" />
+		<edge from-layer="1878" from-port="0" to-layer="1889" to-port="0" />
+		<edge from-layer="1879" from-port="0" to-layer="1889" to-port="1" />
+		<edge from-layer="1880" from-port="1" to-layer="1883" to-port="0" />
+		<edge from-layer="1881" from-port="0" to-layer="1883" to-port="1" />
+		<edge from-layer="1882" from-port="0" to-layer="1883" to-port="2" />
+		<edge from-layer="1883" from-port="3" to-layer="1885" to-port="0" />
+		<edge from-layer="1884" from-port="0" to-layer="1885" to-port="1" />
+		<edge from-layer="1885" from-port="2" to-layer="1887" to-port="0" />
+		<edge from-layer="1886" from-port="0" to-layer="1887" to-port="1" />
+		<edge from-layer="1887" from-port="2" to-layer="1889" to-port="2" />
+		<edge from-layer="1887" from-port="2" to-layer="1895" to-port="2" />
+		<edge from-layer="1887" from-port="2" to-layer="1898" to-port="0" />
+		<edge from-layer="1888" from-port="0" to-layer="1889" to-port="3" />
+		<edge from-layer="1889" from-port="4" to-layer="1891" to-port="2" />
+		<edge from-layer="1890" from-port="0" to-layer="1891" to-port="3" />
+		<edge from-layer="1891" from-port="4" to-layer="1903" to-port="0" />
+		<edge from-layer="1892" from-port="0" to-layer="1895" to-port="0" />
+		<edge from-layer="1893" from-port="0" to-layer="1895" to-port="1" />
+		<edge from-layer="1893" from-port="0" to-layer="1899" to-port="1" />
+		<edge from-layer="1894" from-port="0" to-layer="1895" to-port="3" />
+		<edge from-layer="1894" from-port="0" to-layer="1899" to-port="3" />
+		<edge from-layer="1895" from-port="4" to-layer="1901" to-port="1" />
+		<edge from-layer="1896" from-port="0" to-layer="1899" to-port="0" />
+		<edge from-layer="1897" from-port="0" to-layer="1898" to-port="1" />
+		<edge from-layer="1898" from-port="2" to-layer="1899" to-port="2" />
+		<edge from-layer="1899" from-port="4" to-layer="1901" to-port="2" />
+		<edge from-layer="1900" from-port="0" to-layer="1901" to-port="3" />
+		<edge from-layer="1901" from-port="4" to-layer="1902" to-port="0" />
+		<edge from-layer="1902" from-port="1" to-layer="1903" to-port="1" />
+		<edge from-layer="1903" from-port="2" to-layer="1906" to-port="0" />
+		<edge from-layer="1904" from-port="0" to-layer="1905" to-port="0" />
+		<edge from-layer="1905" from-port="1" to-layer="1906" to-port="1" />
+		<edge from-layer="1906" from-port="2" to-layer="1907" to-port="1" />
+		<edge from-layer="1907" from-port="2" to-layer="1908" to-port="0" />
+		<edge from-layer="1908" from-port="2" to-layer="1910" to-port="0" />
+		<edge from-layer="1909" from-port="0" to-layer="1910" to-port="1" />
+		<edge from-layer="1910" from-port="2" to-layer="1912" to-port="0" />
+		<edge from-layer="1911" from-port="0" to-layer="1912" to-port="1" />
+		<edge from-layer="1912" from-port="2" to-layer="1915" to-port="0" />
+		<edge from-layer="1913" from-port="0" to-layer="1914" to-port="0" />
+		<edge from-layer="1914" from-port="1" to-layer="1915" to-port="1" />
+		<edge from-layer="1915" from-port="2" to-layer="1918" to-port="0" />
+		<edge from-layer="1916" from-port="0" to-layer="1917" to-port="0" />
+		<edge from-layer="1917" from-port="1" to-layer="1918" to-port="1" />
+		<edge from-layer="1918" from-port="2" to-layer="1919" to-port="0" />
+		<edge from-layer="1919" from-port="2" to-layer="1921" to-port="0" />
+		<edge from-layer="1919" from-port="2" to-layer="1969" to-port="0" />
+		<edge from-layer="1919" from-port="2" to-layer="1924" to-port="0" />
+		<edge from-layer="1920" from-port="0" to-layer="1921" to-port="1" />
+		<edge from-layer="1921" from-port="2" to-layer="1923" to-port="0" />
+		<edge from-layer="1922" from-port="0" to-layer="1923" to-port="1" />
+		<edge from-layer="1923" from-port="2" to-layer="1925" to-port="0" />
+		<edge from-layer="1924" from-port="1" to-layer="1925" to-port="1" />
+		<edge from-layer="1925" from-port="2" to-layer="1928" to-port="0" />
+		<edge from-layer="1926" from-port="0" to-layer="1927" to-port="0" />
+		<edge from-layer="1927" from-port="1" to-layer="1928" to-port="1" />
+		<edge from-layer="1928" from-port="2" to-layer="1931" to-port="0" />
+		<edge from-layer="1929" from-port="0" to-layer="1930" to-port="0" />
+		<edge from-layer="1930" from-port="1" to-layer="1931" to-port="1" />
+		<edge from-layer="1931" from-port="2" to-layer="1932" to-port="0" />
+		<edge from-layer="1932" from-port="1" to-layer="1935" to-port="0" />
+		<edge from-layer="1933" from-port="0" to-layer="1934" to-port="0" />
+		<edge from-layer="1934" from-port="1" to-layer="1935" to-port="1" />
+		<edge from-layer="1935" from-port="2" to-layer="1938" to-port="0" />
+		<edge from-layer="1936" from-port="0" to-layer="1937" to-port="0" />
+		<edge from-layer="1937" from-port="1" to-layer="1938" to-port="1" />
+		<edge from-layer="1938" from-port="2" to-layer="1949" to-port="0" />
+		<edge from-layer="1939" from-port="0" to-layer="1940" to-port="0" />
+		<edge from-layer="1940" from-port="1" to-layer="1941" to-port="1" />
+		<edge from-layer="1941" from-port="2" to-layer="1944" to-port="0" />
+		<edge from-layer="1942" from-port="0" to-layer="1943" to-port="0" />
+		<edge from-layer="1943" from-port="1" to-layer="1944" to-port="1" />
+		<edge from-layer="1944" from-port="2" to-layer="1946" to-port="0" />
+		<edge from-layer="1945" from-port="0" to-layer="1946" to-port="1" />
+		<edge from-layer="1946" from-port="2" to-layer="1948" to-port="0" />
+		<edge from-layer="1947" from-port="0" to-layer="1948" to-port="1" />
+		<edge from-layer="1948" from-port="2" to-layer="1949" to-port="1" />
+		<edge from-layer="1949" from-port="2" to-layer="1954" to-port="0" />
+		<edge from-layer="1949" from-port="2" to-layer="1951" to-port="0" />
+		<edge from-layer="1950" from-port="0" to-layer="1951" to-port="1" />
+		<edge from-layer="1951" from-port="2" to-layer="1953" to-port="0" />
+		<edge from-layer="1952" from-port="0" to-layer="1953" to-port="1" />
+		<edge from-layer="1953" from-port="2" to-layer="1955" to-port="0" />
+		<edge from-layer="1954" from-port="1" to-layer="1955" to-port="1" />
+		<edge from-layer="1955" from-port="2" to-layer="1958" to-port="0" />
+		<edge from-layer="1956" from-port="0" to-layer="1957" to-port="0" />
+		<edge from-layer="1957" from-port="1" to-layer="1958" to-port="1" />
+		<edge from-layer="1958" from-port="2" to-layer="1961" to-port="0" />
+		<edge from-layer="1959" from-port="0" to-layer="1960" to-port="0" />
+		<edge from-layer="1960" from-port="1" to-layer="1961" to-port="1" />
+		<edge from-layer="1961" from-port="2" to-layer="1962" to-port="0" />
+		<edge from-layer="1962" from-port="1" to-layer="1965" to-port="0" />
+		<edge from-layer="1963" from-port="0" to-layer="1964" to-port="0" />
+		<edge from-layer="1964" from-port="1" to-layer="1965" to-port="1" />
+		<edge from-layer="1965" from-port="2" to-layer="1968" to-port="0" />
+		<edge from-layer="1966" from-port="0" to-layer="1967" to-port="0" />
+		<edge from-layer="1967" from-port="1" to-layer="1968" to-port="1" />
+		<edge from-layer="1968" from-port="2" to-layer="1969" to-port="1" />
+		<edge from-layer="1969" from-port="2" to-layer="1970" to-port="0" />
+		<edge from-layer="1970" from-port="2" to-layer="1981" to-port="0" />
+		<edge from-layer="1970" from-port="2" to-layer="1978" to-port="0" />
+		<edge from-layer="1970" from-port="2" to-layer="1973" to-port="0" />
+		<edge from-layer="1971" from-port="0" to-layer="1972" to-port="0" />
+		<edge from-layer="1972" from-port="1" to-layer="1973" to-port="1" />
+		<edge from-layer="1973" from-port="2" to-layer="1976" to-port="0" />
+		<edge from-layer="1974" from-port="0" to-layer="1975" to-port="0" />
+		<edge from-layer="1975" from-port="1" to-layer="1976" to-port="1" />
+		<edge from-layer="1976" from-port="2" to-layer="2026" to-port="0" />
+		<edge from-layer="1977" from-port="0" to-layer="1978" to-port="1" />
+		<edge from-layer="1978" from-port="2" to-layer="1980" to-port="0" />
+		<edge from-layer="1979" from-port="0" to-layer="1980" to-port="1" />
+		<edge from-layer="1980" from-port="2" to-layer="1982" to-port="0" />
+		<edge from-layer="1981" from-port="1" to-layer="1982" to-port="1" />
+		<edge from-layer="1982" from-port="2" to-layer="1985" to-port="0" />
+		<edge from-layer="1983" from-port="0" to-layer="1984" to-port="0" />
+		<edge from-layer="1984" from-port="1" to-layer="1985" to-port="1" />
+		<edge from-layer="1985" from-port="2" to-layer="1988" to-port="0" />
+		<edge from-layer="1986" from-port="0" to-layer="1987" to-port="0" />
+		<edge from-layer="1987" from-port="1" to-layer="1988" to-port="1" />
+		<edge from-layer="1988" from-port="2" to-layer="1989" to-port="0" />
+		<edge from-layer="1989" from-port="1" to-layer="1992" to-port="0" />
+		<edge from-layer="1990" from-port="0" to-layer="1991" to-port="0" />
+		<edge from-layer="1991" from-port="1" to-layer="1992" to-port="1" />
+		<edge from-layer="1992" from-port="2" to-layer="1995" to-port="0" />
+		<edge from-layer="1993" from-port="0" to-layer="1994" to-port="0" />
+		<edge from-layer="1994" from-port="1" to-layer="1995" to-port="1" />
+		<edge from-layer="1995" from-port="2" to-layer="2006" to-port="0" />
+		<edge from-layer="1996" from-port="0" to-layer="1997" to-port="0" />
+		<edge from-layer="1997" from-port="1" to-layer="1998" to-port="1" />
+		<edge from-layer="1998" from-port="2" to-layer="2001" to-port="0" />
+		<edge from-layer="1999" from-port="0" to-layer="2000" to-port="0" />
+		<edge from-layer="2000" from-port="1" to-layer="2001" to-port="1" />
+		<edge from-layer="2001" from-port="2" to-layer="2003" to-port="0" />
+		<edge from-layer="2002" from-port="0" to-layer="2003" to-port="1" />
+		<edge from-layer="2003" from-port="2" to-layer="2005" to-port="0" />
+		<edge from-layer="2004" from-port="0" to-layer="2005" to-port="1" />
+		<edge from-layer="2005" from-port="2" to-layer="2006" to-port="1" />
+		<edge from-layer="2006" from-port="2" to-layer="2008" to-port="0" />
+		<edge from-layer="2006" from-port="2" to-layer="2011" to-port="0" />
+		<edge from-layer="2007" from-port="0" to-layer="2008" to-port="1" />
+		<edge from-layer="2008" from-port="2" to-layer="2010" to-port="0" />
+		<edge from-layer="2009" from-port="0" to-layer="2010" to-port="1" />
+		<edge from-layer="2010" from-port="2" to-layer="2012" to-port="0" />
+		<edge from-layer="2011" from-port="1" to-layer="2012" to-port="1" />
+		<edge from-layer="2012" from-port="2" to-layer="2015" to-port="0" />
+		<edge from-layer="2013" from-port="0" to-layer="2014" to-port="0" />
+		<edge from-layer="2014" from-port="1" to-layer="2015" to-port="1" />
+		<edge from-layer="2015" from-port="2" to-layer="2018" to-port="0" />
+		<edge from-layer="2016" from-port="0" to-layer="2017" to-port="0" />
+		<edge from-layer="2017" from-port="1" to-layer="2018" to-port="1" />
+		<edge from-layer="2018" from-port="2" to-layer="2019" to-port="0" />
+		<edge from-layer="2019" from-port="1" to-layer="2022" to-port="0" />
+		<edge from-layer="2020" from-port="0" to-layer="2021" to-port="0" />
+		<edge from-layer="2021" from-port="1" to-layer="2022" to-port="1" />
+		<edge from-layer="2022" from-port="2" to-layer="2025" to-port="0" />
+		<edge from-layer="2023" from-port="0" to-layer="2024" to-port="0" />
+		<edge from-layer="2024" from-port="1" to-layer="2025" to-port="1" />
+		<edge from-layer="2025" from-port="2" to-layer="2026" to-port="1" />
+		<edge from-layer="2026" from-port="2" to-layer="2027" to-port="0" />
+		<edge from-layer="2027" from-port="2" to-layer="2038" to-port="0" />
+		<edge from-layer="2027" from-port="2" to-layer="2030" to-port="0" />
+		<edge from-layer="2027" from-port="2" to-layer="2035" to-port="0" />
+		<edge from-layer="2028" from-port="0" to-layer="2029" to-port="0" />
+		<edge from-layer="2029" from-port="1" to-layer="2030" to-port="1" />
+		<edge from-layer="2030" from-port="2" to-layer="2033" to-port="0" />
+		<edge from-layer="2031" from-port="0" to-layer="2032" to-port="0" />
+		<edge from-layer="2032" from-port="1" to-layer="2033" to-port="1" />
+		<edge from-layer="2033" from-port="2" to-layer="2083" to-port="0" />
+		<edge from-layer="2034" from-port="0" to-layer="2035" to-port="1" />
+		<edge from-layer="2035" from-port="2" to-layer="2037" to-port="0" />
+		<edge from-layer="2036" from-port="0" to-layer="2037" to-port="1" />
+		<edge from-layer="2037" from-port="2" to-layer="2039" to-port="0" />
+		<edge from-layer="2038" from-port="1" to-layer="2039" to-port="1" />
+		<edge from-layer="2039" from-port="2" to-layer="2042" to-port="0" />
+		<edge from-layer="2040" from-port="0" to-layer="2041" to-port="0" />
+		<edge from-layer="2041" from-port="1" to-layer="2042" to-port="1" />
+		<edge from-layer="2042" from-port="2" to-layer="2045" to-port="0" />
+		<edge from-layer="2043" from-port="0" to-layer="2044" to-port="0" />
+		<edge from-layer="2044" from-port="1" to-layer="2045" to-port="1" />
+		<edge from-layer="2045" from-port="2" to-layer="2046" to-port="0" />
+		<edge from-layer="2046" from-port="1" to-layer="2049" to-port="0" />
+		<edge from-layer="2047" from-port="0" to-layer="2048" to-port="0" />
+		<edge from-layer="2048" from-port="1" to-layer="2049" to-port="1" />
+		<edge from-layer="2049" from-port="2" to-layer="2052" to-port="0" />
+		<edge from-layer="2050" from-port="0" to-layer="2051" to-port="0" />
+		<edge from-layer="2051" from-port="1" to-layer="2052" to-port="1" />
+		<edge from-layer="2052" from-port="2" to-layer="2063" to-port="0" />
+		<edge from-layer="2053" from-port="0" to-layer="2054" to-port="0" />
+		<edge from-layer="2054" from-port="1" to-layer="2055" to-port="1" />
+		<edge from-layer="2055" from-port="2" to-layer="2058" to-port="0" />
+		<edge from-layer="2056" from-port="0" to-layer="2057" to-port="0" />
+		<edge from-layer="2057" from-port="1" to-layer="2058" to-port="1" />
+		<edge from-layer="2058" from-port="2" to-layer="2060" to-port="0" />
+		<edge from-layer="2059" from-port="0" to-layer="2060" to-port="1" />
+		<edge from-layer="2060" from-port="2" to-layer="2062" to-port="0" />
+		<edge from-layer="2061" from-port="0" to-layer="2062" to-port="1" />
+		<edge from-layer="2062" from-port="2" to-layer="2063" to-port="1" />
+		<edge from-layer="2063" from-port="2" to-layer="2068" to-port="0" />
+		<edge from-layer="2063" from-port="2" to-layer="2065" to-port="0" />
+		<edge from-layer="2064" from-port="0" to-layer="2065" to-port="1" />
+		<edge from-layer="2065" from-port="2" to-layer="2067" to-port="0" />
+		<edge from-layer="2066" from-port="0" to-layer="2067" to-port="1" />
+		<edge from-layer="2067" from-port="2" to-layer="2069" to-port="0" />
+		<edge from-layer="2068" from-port="1" to-layer="2069" to-port="1" />
+		<edge from-layer="2069" from-port="2" to-layer="2072" to-port="0" />
+		<edge from-layer="2070" from-port="0" to-layer="2071" to-port="0" />
+		<edge from-layer="2071" from-port="1" to-layer="2072" to-port="1" />
+		<edge from-layer="2072" from-port="2" to-layer="2075" to-port="0" />
+		<edge from-layer="2073" from-port="0" to-layer="2074" to-port="0" />
+		<edge from-layer="2074" from-port="1" to-layer="2075" to-port="1" />
+		<edge from-layer="2075" from-port="2" to-layer="2076" to-port="0" />
+		<edge from-layer="2076" from-port="1" to-layer="2079" to-port="0" />
+		<edge from-layer="2077" from-port="0" to-layer="2078" to-port="0" />
+		<edge from-layer="2078" from-port="1" to-layer="2079" to-port="1" />
+		<edge from-layer="2079" from-port="2" to-layer="2082" to-port="0" />
+		<edge from-layer="2080" from-port="0" to-layer="2081" to-port="0" />
+		<edge from-layer="2081" from-port="1" to-layer="2082" to-port="1" />
+		<edge from-layer="2082" from-port="2" to-layer="2083" to-port="1" />
+		<edge from-layer="2083" from-port="2" to-layer="2084" to-port="0" />
+		<edge from-layer="2084" from-port="2" to-layer="2087" to-port="0" />
+		<edge from-layer="2084" from-port="2" to-layer="2092" to-port="0" />
+		<edge from-layer="2084" from-port="2" to-layer="2095" to-port="0" />
+		<edge from-layer="2085" from-port="0" to-layer="2086" to-port="0" />
+		<edge from-layer="2086" from-port="1" to-layer="2087" to-port="1" />
+		<edge from-layer="2087" from-port="2" to-layer="2090" to-port="0" />
+		<edge from-layer="2088" from-port="0" to-layer="2089" to-port="0" />
+		<edge from-layer="2089" from-port="1" to-layer="2090" to-port="1" />
+		<edge from-layer="2090" from-port="2" to-layer="2140" to-port="0" />
+		<edge from-layer="2091" from-port="0" to-layer="2092" to-port="1" />
+		<edge from-layer="2092" from-port="2" to-layer="2094" to-port="0" />
+		<edge from-layer="2093" from-port="0" to-layer="2094" to-port="1" />
+		<edge from-layer="2094" from-port="2" to-layer="2096" to-port="0" />
+		<edge from-layer="2095" from-port="1" to-layer="2096" to-port="1" />
+		<edge from-layer="2096" from-port="2" to-layer="2099" to-port="0" />
+		<edge from-layer="2097" from-port="0" to-layer="2098" to-port="0" />
+		<edge from-layer="2098" from-port="1" to-layer="2099" to-port="1" />
+		<edge from-layer="2099" from-port="2" to-layer="2102" to-port="0" />
+		<edge from-layer="2100" from-port="0" to-layer="2101" to-port="0" />
+		<edge from-layer="2101" from-port="1" to-layer="2102" to-port="1" />
+		<edge from-layer="2102" from-port="2" to-layer="2103" to-port="0" />
+		<edge from-layer="2103" from-port="1" to-layer="2106" to-port="0" />
+		<edge from-layer="2104" from-port="0" to-layer="2105" to-port="0" />
+		<edge from-layer="2105" from-port="1" to-layer="2106" to-port="1" />
+		<edge from-layer="2106" from-port="2" to-layer="2109" to-port="0" />
+		<edge from-layer="2107" from-port="0" to-layer="2108" to-port="0" />
+		<edge from-layer="2108" from-port="1" to-layer="2109" to-port="1" />
+		<edge from-layer="2109" from-port="2" to-layer="2120" to-port="0" />
+		<edge from-layer="2110" from-port="0" to-layer="2111" to-port="0" />
+		<edge from-layer="2111" from-port="1" to-layer="2112" to-port="1" />
+		<edge from-layer="2112" from-port="2" to-layer="2115" to-port="0" />
+		<edge from-layer="2113" from-port="0" to-layer="2114" to-port="0" />
+		<edge from-layer="2114" from-port="1" to-layer="2115" to-port="1" />
+		<edge from-layer="2115" from-port="2" to-layer="2117" to-port="0" />
+		<edge from-layer="2116" from-port="0" to-layer="2117" to-port="1" />
+		<edge from-layer="2117" from-port="2" to-layer="2119" to-port="0" />
+		<edge from-layer="2118" from-port="0" to-layer="2119" to-port="1" />
+		<edge from-layer="2119" from-port="2" to-layer="2120" to-port="1" />
+		<edge from-layer="2120" from-port="2" to-layer="2125" to-port="0" />
+		<edge from-layer="2120" from-port="2" to-layer="2122" to-port="0" />
+		<edge from-layer="2121" from-port="0" to-layer="2122" to-port="1" />
+		<edge from-layer="2122" from-port="2" to-layer="2124" to-port="0" />
+		<edge from-layer="2123" from-port="0" to-layer="2124" to-port="1" />
+		<edge from-layer="2124" from-port="2" to-layer="2126" to-port="0" />
+		<edge from-layer="2125" from-port="1" to-layer="2126" to-port="1" />
+		<edge from-layer="2126" from-port="2" to-layer="2129" to-port="0" />
+		<edge from-layer="2127" from-port="0" to-layer="2128" to-port="0" />
+		<edge from-layer="2128" from-port="1" to-layer="2129" to-port="1" />
+		<edge from-layer="2129" from-port="2" to-layer="2132" to-port="0" />
+		<edge from-layer="2130" from-port="0" to-layer="2131" to-port="0" />
+		<edge from-layer="2131" from-port="1" to-layer="2132" to-port="1" />
+		<edge from-layer="2132" from-port="2" to-layer="2133" to-port="0" />
+		<edge from-layer="2133" from-port="1" to-layer="2136" to-port="0" />
+		<edge from-layer="2134" from-port="0" to-layer="2135" to-port="0" />
+		<edge from-layer="2135" from-port="1" to-layer="2136" to-port="1" />
+		<edge from-layer="2136" from-port="2" to-layer="2139" to-port="0" />
+		<edge from-layer="2137" from-port="0" to-layer="2138" to-port="0" />
+		<edge from-layer="2138" from-port="1" to-layer="2139" to-port="1" />
+		<edge from-layer="2139" from-port="2" to-layer="2140" to-port="1" />
+		<edge from-layer="2140" from-port="2" to-layer="2146" to-port="0" />
+		<edge from-layer="2140" from-port="2" to-layer="2141" to-port="0" />
+		<edge from-layer="2141" from-port="1" to-layer="2142" to-port="0" />
+		<edge from-layer="2142" from-port="1" to-layer="2144" to-port="0" />
+		<edge from-layer="2143" from-port="0" to-layer="2146" to-port="2" />
+		<edge from-layer="2143" from-port="0" to-layer="2144" to-port="1" />
+		<edge from-layer="2144" from-port="2" to-layer="2145" to-port="0" />
+		<edge from-layer="2145" from-port="1" to-layer="2146" to-port="1" />
+		<edge from-layer="2146" from-port="3" to-layer="2149" to-port="0" />
+		<edge from-layer="2147" from-port="0" to-layer="2148" to-port="0" />
+		<edge from-layer="2148" from-port="1" to-layer="2149" to-port="1" />
+		<edge from-layer="2149" from-port="2" to-layer="2152" to-port="0" />
+		<edge from-layer="2150" from-port="0" to-layer="2151" to-port="0" />
+		<edge from-layer="2151" from-port="1" to-layer="2152" to-port="1" />
+		<edge from-layer="2152" from-port="2" to-layer="2153" to-port="0" />
+		<edge from-layer="2153" from-port="2" to-layer="2156" to-port="0" />
+		<edge from-layer="2153" from-port="2" to-layer="2161" to-port="0" />
+		<edge from-layer="2153" from-port="2" to-layer="2164" to-port="0" />
+		<edge from-layer="2154" from-port="0" to-layer="2155" to-port="0" />
+		<edge from-layer="2155" from-port="1" to-layer="2156" to-port="1" />
+		<edge from-layer="2156" from-port="2" to-layer="2159" to-port="0" />
+		<edge from-layer="2157" from-port="0" to-layer="2158" to-port="0" />
+		<edge from-layer="2158" from-port="1" to-layer="2159" to-port="1" />
+		<edge from-layer="2159" from-port="2" to-layer="2209" to-port="0" />
+		<edge from-layer="2160" from-port="0" to-layer="2161" to-port="1" />
+		<edge from-layer="2161" from-port="2" to-layer="2163" to-port="0" />
+		<edge from-layer="2162" from-port="0" to-layer="2163" to-port="1" />
+		<edge from-layer="2163" from-port="2" to-layer="2165" to-port="0" />
+		<edge from-layer="2164" from-port="1" to-layer="2165" to-port="1" />
+		<edge from-layer="2165" from-port="2" to-layer="2168" to-port="0" />
+		<edge from-layer="2166" from-port="0" to-layer="2167" to-port="0" />
+		<edge from-layer="2167" from-port="1" to-layer="2168" to-port="1" />
+		<edge from-layer="2168" from-port="2" to-layer="2171" to-port="0" />
+		<edge from-layer="2169" from-port="0" to-layer="2170" to-port="0" />
+		<edge from-layer="2170" from-port="1" to-layer="2171" to-port="1" />
+		<edge from-layer="2171" from-port="2" to-layer="2172" to-port="0" />
+		<edge from-layer="2172" from-port="1" to-layer="2175" to-port="0" />
+		<edge from-layer="2173" from-port="0" to-layer="2174" to-port="0" />
+		<edge from-layer="2174" from-port="1" to-layer="2175" to-port="1" />
+		<edge from-layer="2175" from-port="2" to-layer="2178" to-port="0" />
+		<edge from-layer="2176" from-port="0" to-layer="2177" to-port="0" />
+		<edge from-layer="2177" from-port="1" to-layer="2178" to-port="1" />
+		<edge from-layer="2178" from-port="2" to-layer="2189" to-port="0" />
+		<edge from-layer="2179" from-port="0" to-layer="2180" to-port="0" />
+		<edge from-layer="2180" from-port="1" to-layer="2181" to-port="1" />
+		<edge from-layer="2181" from-port="2" to-layer="2184" to-port="0" />
+		<edge from-layer="2182" from-port="0" to-layer="2183" to-port="0" />
+		<edge from-layer="2183" from-port="1" to-layer="2184" to-port="1" />
+		<edge from-layer="2184" from-port="2" to-layer="2186" to-port="0" />
+		<edge from-layer="2185" from-port="0" to-layer="2186" to-port="1" />
+		<edge from-layer="2186" from-port="2" to-layer="2188" to-port="0" />
+		<edge from-layer="2187" from-port="0" to-layer="2188" to-port="1" />
+		<edge from-layer="2188" from-port="2" to-layer="2189" to-port="1" />
+		<edge from-layer="2189" from-port="2" to-layer="2194" to-port="0" />
+		<edge from-layer="2189" from-port="2" to-layer="2191" to-port="0" />
+		<edge from-layer="2190" from-port="0" to-layer="2191" to-port="1" />
+		<edge from-layer="2191" from-port="2" to-layer="2193" to-port="0" />
+		<edge from-layer="2192" from-port="0" to-layer="2193" to-port="1" />
+		<edge from-layer="2193" from-port="2" to-layer="2195" to-port="0" />
+		<edge from-layer="2194" from-port="1" to-layer="2195" to-port="1" />
+		<edge from-layer="2195" from-port="2" to-layer="2198" to-port="0" />
+		<edge from-layer="2196" from-port="0" to-layer="2197" to-port="0" />
+		<edge from-layer="2197" from-port="1" to-layer="2198" to-port="1" />
+		<edge from-layer="2198" from-port="2" to-layer="2201" to-port="0" />
+		<edge from-layer="2199" from-port="0" to-layer="2200" to-port="0" />
+		<edge from-layer="2200" from-port="1" to-layer="2201" to-port="1" />
+		<edge from-layer="2201" from-port="2" to-layer="2202" to-port="0" />
+		<edge from-layer="2202" from-port="1" to-layer="2205" to-port="0" />
+		<edge from-layer="2203" from-port="0" to-layer="2204" to-port="0" />
+		<edge from-layer="2204" from-port="1" to-layer="2205" to-port="1" />
+		<edge from-layer="2205" from-port="2" to-layer="2208" to-port="0" />
+		<edge from-layer="2206" from-port="0" to-layer="2207" to-port="0" />
+		<edge from-layer="2207" from-port="1" to-layer="2208" to-port="1" />
+		<edge from-layer="2208" from-port="2" to-layer="2209" to-port="1" />
+		<edge from-layer="2209" from-port="2" to-layer="2211" to-port="0" />
+		<edge from-layer="2209" from-port="2" to-layer="2390" to-port="1" />
+		<edge from-layer="2209" from-port="2" to-layer="2214" to-port="0" />
+		<edge from-layer="2210" from-port="0" to-layer="2211" to-port="1" />
+		<edge from-layer="2211" from-port="2" to-layer="2213" to-port="0" />
+		<edge from-layer="2212" from-port="0" to-layer="2213" to-port="1" />
+		<edge from-layer="2213" from-port="2" to-layer="2215" to-port="0" />
+		<edge from-layer="2214" from-port="1" to-layer="2215" to-port="1" />
+		<edge from-layer="2215" from-port="2" to-layer="2218" to-port="0" />
+		<edge from-layer="2216" from-port="0" to-layer="2217" to-port="0" />
+		<edge from-layer="2217" from-port="1" to-layer="2218" to-port="1" />
+		<edge from-layer="2218" from-port="2" to-layer="2221" to-port="0" />
+		<edge from-layer="2219" from-port="0" to-layer="2220" to-port="0" />
+		<edge from-layer="2220" from-port="1" to-layer="2221" to-port="1" />
+		<edge from-layer="2221" from-port="2" to-layer="2224" to-port="0" />
+		<edge from-layer="2222" from-port="0" to-layer="2223" to-port="0" />
+		<edge from-layer="2223" from-port="1" to-layer="2224" to-port="1" />
+		<edge from-layer="2224" from-port="2" to-layer="2227" to-port="0" />
+		<edge from-layer="2225" from-port="0" to-layer="2226" to-port="0" />
+		<edge from-layer="2226" from-port="1" to-layer="2227" to-port="1" />
+		<edge from-layer="2227" from-port="2" to-layer="2229" to-port="0" />
+		<edge from-layer="2228" from-port="0" to-layer="2229" to-port="1" />
+		<edge from-layer="2229" from-port="2" to-layer="2231" to-port="0" />
+		<edge from-layer="2230" from-port="0" to-layer="2231" to-port="1" />
+		<edge from-layer="2231" from-port="2" to-layer="2283" to-port="1" />
+		<edge from-layer="2231" from-port="2" to-layer="2233" to-port="0" />
+		<edge from-layer="2232" from-port="0" to-layer="2233" to-port="1" />
+		<edge from-layer="2233" from-port="2" to-layer="2236" to-port="0" />
+		<edge from-layer="2234" from-port="0" to-layer="2235" to-port="0" />
+		<edge from-layer="2235" from-port="1" to-layer="2236" to-port="1" />
+		<edge from-layer="2236" from-port="2" to-layer="2239" to-port="0" />
+		<edge from-layer="2237" from-port="0" to-layer="2238" to-port="0" />
+		<edge from-layer="2238" from-port="1" to-layer="2239" to-port="1" />
+		<edge from-layer="2239" from-port="2" to-layer="2265" to-port="0" />
+		<edge from-layer="2239" from-port="2" to-layer="2242" to-port="0" />
+		<edge from-layer="2239" from-port="2" to-layer="2251" to-port="0" />
+		<edge from-layer="2240" from-port="0" to-layer="2241" to-port="0" />
+		<edge from-layer="2241" from-port="1" to-layer="2242" to-port="1" />
+		<edge from-layer="2242" from-port="2" to-layer="2244" to-port="0" />
+		<edge from-layer="2243" from-port="0" to-layer="2244" to-port="1" />
+		<edge from-layer="2244" from-port="2" to-layer="2246" to-port="0" />
+		<edge from-layer="2245" from-port="0" to-layer="2246" to-port="1" />
+		<edge from-layer="2246" from-port="2" to-layer="2248" to-port="0" />
+		<edge from-layer="2247" from-port="0" to-layer="2248" to-port="1" />
+		<edge from-layer="2248" from-port="2" to-layer="2261" to-port="0" />
+		<edge from-layer="2249" from-port="0" to-layer="2250" to-port="0" />
+		<edge from-layer="2250" from-port="1" to-layer="2251" to-port="1" />
+		<edge from-layer="2251" from-port="2" to-layer="2253" to-port="0" />
+		<edge from-layer="2252" from-port="0" to-layer="2253" to-port="1" />
+		<edge from-layer="2253" from-port="2" to-layer="2255" to-port="0" />
+		<edge from-layer="2254" from-port="0" to-layer="2255" to-port="1" />
+		<edge from-layer="2255" from-port="2" to-layer="2257" to-port="0" />
+		<edge from-layer="2256" from-port="0" to-layer="2257" to-port="1" />
+		<edge from-layer="2257" from-port="2" to-layer="2260" to-port="0" />
+		<edge from-layer="2258" from-port="0" to-layer="2259" to-port="0" />
+		<edge from-layer="2259" from-port="1" to-layer="2260" to-port="1" />
+		<edge from-layer="2260" from-port="2" to-layer="2261" to-port="1" />
+		<edge from-layer="2261" from-port="2" to-layer="2262" to-port="0" />
+		<edge from-layer="2262" from-port="1" to-layer="2272" to-port="0" />
+		<edge from-layer="2263" from-port="0" to-layer="2264" to-port="0" />
+		<edge from-layer="2264" from-port="1" to-layer="2265" to-port="1" />
+		<edge from-layer="2265" from-port="2" to-layer="2267" to-port="0" />
+		<edge from-layer="2266" from-port="0" to-layer="2267" to-port="1" />
+		<edge from-layer="2267" from-port="2" to-layer="2269" to-port="0" />
+		<edge from-layer="2268" from-port="0" to-layer="2269" to-port="1" />
+		<edge from-layer="2269" from-port="2" to-layer="2271" to-port="0" />
+		<edge from-layer="2270" from-port="0" to-layer="2271" to-port="1" />
+		<edge from-layer="2271" from-port="2" to-layer="2272" to-port="1" />
+		<edge from-layer="2272" from-port="2" to-layer="2274" to-port="0" />
+		<edge from-layer="2273" from-port="0" to-layer="2274" to-port="1" />
+		<edge from-layer="2274" from-port="2" to-layer="2276" to-port="0" />
+		<edge from-layer="2275" from-port="0" to-layer="2276" to-port="1" />
+		<edge from-layer="2276" from-port="2" to-layer="2278" to-port="0" />
+		<edge from-layer="2277" from-port="0" to-layer="2278" to-port="1" />
+		<edge from-layer="2278" from-port="2" to-layer="2281" to-port="0" />
+		<edge from-layer="2279" from-port="0" to-layer="2280" to-port="0" />
+		<edge from-layer="2280" from-port="1" to-layer="2281" to-port="1" />
+		<edge from-layer="2281" from-port="2" to-layer="2282" to-port="1" />
+		<edge from-layer="2282" from-port="2" to-layer="2283" to-port="0" />
+		<edge from-layer="2283" from-port="2" to-layer="2285" to-port="0" />
+		<edge from-layer="2283" from-port="2" to-layer="2335" to-port="1" />
+		<edge from-layer="2284" from-port="0" to-layer="2285" to-port="1" />
+		<edge from-layer="2285" from-port="2" to-layer="2288" to-port="0" />
+		<edge from-layer="2286" from-port="0" to-layer="2287" to-port="0" />
+		<edge from-layer="2287" from-port="1" to-layer="2288" to-port="1" />
+		<edge from-layer="2288" from-port="2" to-layer="2291" to-port="0" />
+		<edge from-layer="2289" from-port="0" to-layer="2290" to-port="0" />
+		<edge from-layer="2290" from-port="1" to-layer="2291" to-port="1" />
+		<edge from-layer="2291" from-port="2" to-layer="2294" to-port="0" />
+		<edge from-layer="2292" from-port="0" to-layer="2293" to-port="0" />
+		<edge from-layer="2293" from-port="1" to-layer="2294" to-port="1" />
+		<edge from-layer="2294" from-port="2" to-layer="2296" to-port="0" />
+		<edge from-layer="2295" from-port="0" to-layer="2296" to-port="1" />
+		<edge from-layer="2296" from-port="2" to-layer="2298" to-port="0" />
+		<edge from-layer="2297" from-port="0" to-layer="2298" to-port="1" />
+		<edge from-layer="2298" from-port="2" to-layer="2300" to-port="0" />
+		<edge from-layer="2299" from-port="0" to-layer="2300" to-port="1" />
+		<edge from-layer="2300" from-port="2" to-layer="2313" to-port="0" />
+		<edge from-layer="2301" from-port="0" to-layer="2302" to-port="0" />
+		<edge from-layer="2302" from-port="1" to-layer="2303" to-port="1" />
+		<edge from-layer="2303" from-port="2" to-layer="2305" to-port="0" />
+		<edge from-layer="2304" from-port="0" to-layer="2305" to-port="1" />
+		<edge from-layer="2305" from-port="2" to-layer="2307" to-port="0" />
+		<edge from-layer="2306" from-port="0" to-layer="2307" to-port="1" />
+		<edge from-layer="2307" from-port="2" to-layer="2309" to-port="0" />
+		<edge from-layer="2308" from-port="0" to-layer="2309" to-port="1" />
+		<edge from-layer="2309" from-port="2" to-layer="2312" to-port="0" />
+		<edge from-layer="2310" from-port="0" to-layer="2311" to-port="0" />
+		<edge from-layer="2311" from-port="1" to-layer="2312" to-port="1" />
+		<edge from-layer="2312" from-port="2" to-layer="2313" to-port="1" />
+		<edge from-layer="2313" from-port="2" to-layer="2314" to-port="0" />
+		<edge from-layer="2314" from-port="1" to-layer="2324" to-port="0" />
+		<edge from-layer="2315" from-port="0" to-layer="2316" to-port="0" />
+		<edge from-layer="2316" from-port="1" to-layer="2317" to-port="1" />
+		<edge from-layer="2317" from-port="2" to-layer="2319" to-port="0" />
+		<edge from-layer="2318" from-port="0" to-layer="2319" to-port="1" />
+		<edge from-layer="2319" from-port="2" to-layer="2321" to-port="0" />
+		<edge from-layer="2320" from-port="0" to-layer="2321" to-port="1" />
+		<edge from-layer="2321" from-port="2" to-layer="2323" to-port="0" />
+		<edge from-layer="2322" from-port="0" to-layer="2323" to-port="1" />
+		<edge from-layer="2323" from-port="2" to-layer="2324" to-port="1" />
+		<edge from-layer="2324" from-port="2" to-layer="2326" to-port="0" />
+		<edge from-layer="2325" from-port="0" to-layer="2326" to-port="1" />
+		<edge from-layer="2326" from-port="2" to-layer="2328" to-port="0" />
+		<edge from-layer="2327" from-port="0" to-layer="2328" to-port="1" />
+		<edge from-layer="2328" from-port="2" to-layer="2330" to-port="0" />
+		<edge from-layer="2329" from-port="0" to-layer="2330" to-port="1" />
+		<edge from-layer="2330" from-port="2" to-layer="2333" to-port="0" />
+		<edge from-layer="2331" from-port="0" to-layer="2332" to-port="0" />
+		<edge from-layer="2332" from-port="1" to-layer="2333" to-port="1" />
+		<edge from-layer="2333" from-port="2" to-layer="2334" to-port="1" />
+		<edge from-layer="2334" from-port="2" to-layer="2335" to-port="0" />
+		<edge from-layer="2335" from-port="2" to-layer="2337" to-port="0" />
+		<edge from-layer="2335" from-port="2" to-layer="2379" to-port="1" />
+		<edge from-layer="2336" from-port="0" to-layer="2337" to-port="1" />
+		<edge from-layer="2337" from-port="2" to-layer="2340" to-port="0" />
+		<edge from-layer="2338" from-port="0" to-layer="2339" to-port="0" />
+		<edge from-layer="2339" from-port="1" to-layer="2340" to-port="1" />
+		<edge from-layer="2340" from-port="2" to-layer="2343" to-port="0" />
+		<edge from-layer="2341" from-port="0" to-layer="2342" to-port="0" />
+		<edge from-layer="2342" from-port="1" to-layer="2343" to-port="1" />
+		<edge from-layer="2343" from-port="2" to-layer="2346" to-port="0" />
+		<edge from-layer="2344" from-port="0" to-layer="2345" to-port="0" />
+		<edge from-layer="2345" from-port="1" to-layer="2346" to-port="1" />
+		<edge from-layer="2346" from-port="2" to-layer="2347" to-port="1" />
+		<edge from-layer="2347" from-port="2" to-layer="2351" to-port="0" />
+		<edge from-layer="2347" from-port="2" to-layer="2372" to-port="0" />
+		<edge from-layer="2347" from-port="2" to-layer="2362" to-port="0" />
+		<edge from-layer="2348" from-port="0" to-layer="2362" to-port="1" />
+		<edge from-layer="2349" from-port="0" to-layer="2360" to-port="0" />
+		<edge from-layer="2350" from-port="0" to-layer="2360" to-port="1" />
+		<edge from-layer="2351" from-port="1" to-layer="2354" to-port="0" />
+		<edge from-layer="2352" from-port="0" to-layer="2354" to-port="1" />
+		<edge from-layer="2353" from-port="0" to-layer="2354" to-port="2" />
+		<edge from-layer="2354" from-port="3" to-layer="2356" to-port="0" />
+		<edge from-layer="2355" from-port="0" to-layer="2356" to-port="1" />
+		<edge from-layer="2356" from-port="2" to-layer="2358" to-port="0" />
+		<edge from-layer="2357" from-port="0" to-layer="2358" to-port="1" />
+		<edge from-layer="2358" from-port="2" to-layer="2369" to-port="0" />
+		<edge from-layer="2358" from-port="2" to-layer="2366" to-port="2" />
+		<edge from-layer="2358" from-port="2" to-layer="2360" to-port="2" />
+		<edge from-layer="2359" from-port="0" to-layer="2360" to-port="3" />
+		<edge from-layer="2360" from-port="4" to-layer="2362" to-port="2" />
+		<edge from-layer="2361" from-port="0" to-layer="2362" to-port="3" />
+		<edge from-layer="2362" from-port="4" to-layer="2374" to-port="0" />
+		<edge from-layer="2363" from-port="0" to-layer="2366" to-port="0" />
+		<edge from-layer="2364" from-port="0" to-layer="2370" to-port="1" />
+		<edge from-layer="2364" from-port="0" to-layer="2366" to-port="1" />
+		<edge from-layer="2365" from-port="0" to-layer="2370" to-port="3" />
+		<edge from-layer="2365" from-port="0" to-layer="2366" to-port="3" />
+		<edge from-layer="2366" from-port="4" to-layer="2372" to-port="1" />
+		<edge from-layer="2367" from-port="0" to-layer="2370" to-port="0" />
+		<edge from-layer="2368" from-port="0" to-layer="2369" to-port="1" />
+		<edge from-layer="2369" from-port="2" to-layer="2370" to-port="2" />
+		<edge from-layer="2370" from-port="4" to-layer="2372" to-port="2" />
+		<edge from-layer="2371" from-port="0" to-layer="2372" to-port="3" />
+		<edge from-layer="2372" from-port="4" to-layer="2373" to-port="0" />
+		<edge from-layer="2373" from-port="1" to-layer="2374" to-port="1" />
+		<edge from-layer="2374" from-port="2" to-layer="2377" to-port="0" />
+		<edge from-layer="2375" from-port="0" to-layer="2376" to-port="0" />
+		<edge from-layer="2376" from-port="1" to-layer="2377" to-port="1" />
+		<edge from-layer="2377" from-port="2" to-layer="2378" to-port="1" />
+		<edge from-layer="2378" from-port="2" to-layer="2379" to-port="0" />
+		<edge from-layer="2379" from-port="2" to-layer="2381" to-port="0" />
+		<edge from-layer="2380" from-port="0" to-layer="2381" to-port="1" />
+		<edge from-layer="2381" from-port="2" to-layer="2383" to-port="0" />
+		<edge from-layer="2382" from-port="0" to-layer="2383" to-port="1" />
+		<edge from-layer="2383" from-port="2" to-layer="2386" to-port="0" />
+		<edge from-layer="2384" from-port="0" to-layer="2385" to-port="0" />
+		<edge from-layer="2385" from-port="1" to-layer="2386" to-port="1" />
+		<edge from-layer="2386" from-port="2" to-layer="2389" to-port="0" />
+		<edge from-layer="2387" from-port="0" to-layer="2388" to-port="0" />
+		<edge from-layer="2388" from-port="1" to-layer="2389" to-port="1" />
+		<edge from-layer="2389" from-port="2" to-layer="2390" to-port="0" />
+		<edge from-layer="2390" from-port="2" to-layer="2391" to-port="0" />
+		<edge from-layer="2391" from-port="2" to-layer="2399" to-port="0" />
+		<edge from-layer="2391" from-port="2" to-layer="2402" to-port="0" />
+		<edge from-layer="2391" from-port="2" to-layer="2394" to-port="0" />
+		<edge from-layer="2392" from-port="0" to-layer="2393" to-port="0" />
+		<edge from-layer="2393" from-port="1" to-layer="2394" to-port="1" />
+		<edge from-layer="2394" from-port="2" to-layer="2397" to-port="0" />
+		<edge from-layer="2395" from-port="0" to-layer="2396" to-port="0" />
+		<edge from-layer="2396" from-port="1" to-layer="2397" to-port="1" />
+		<edge from-layer="2397" from-port="2" to-layer="2447" to-port="0" />
+		<edge from-layer="2398" from-port="0" to-layer="2399" to-port="1" />
+		<edge from-layer="2399" from-port="2" to-layer="2401" to-port="0" />
+		<edge from-layer="2400" from-port="0" to-layer="2401" to-port="1" />
+		<edge from-layer="2401" from-port="2" to-layer="2403" to-port="0" />
+		<edge from-layer="2402" from-port="1" to-layer="2403" to-port="1" />
+		<edge from-layer="2403" from-port="2" to-layer="2406" to-port="0" />
+		<edge from-layer="2404" from-port="0" to-layer="2405" to-port="0" />
+		<edge from-layer="2405" from-port="1" to-layer="2406" to-port="1" />
+		<edge from-layer="2406" from-port="2" to-layer="2409" to-port="0" />
+		<edge from-layer="2407" from-port="0" to-layer="2408" to-port="0" />
+		<edge from-layer="2408" from-port="1" to-layer="2409" to-port="1" />
+		<edge from-layer="2409" from-port="2" to-layer="2410" to-port="0" />
+		<edge from-layer="2410" from-port="1" to-layer="2413" to-port="0" />
+		<edge from-layer="2411" from-port="0" to-layer="2412" to-port="0" />
+		<edge from-layer="2412" from-port="1" to-layer="2413" to-port="1" />
+		<edge from-layer="2413" from-port="2" to-layer="2416" to-port="0" />
+		<edge from-layer="2414" from-port="0" to-layer="2415" to-port="0" />
+		<edge from-layer="2415" from-port="1" to-layer="2416" to-port="1" />
+		<edge from-layer="2416" from-port="2" to-layer="2427" to-port="0" />
+		<edge from-layer="2417" from-port="0" to-layer="2418" to-port="0" />
+		<edge from-layer="2418" from-port="1" to-layer="2419" to-port="1" />
+		<edge from-layer="2419" from-port="2" to-layer="2422" to-port="0" />
+		<edge from-layer="2420" from-port="0" to-layer="2421" to-port="0" />
+		<edge from-layer="2421" from-port="1" to-layer="2422" to-port="1" />
+		<edge from-layer="2422" from-port="2" to-layer="2424" to-port="0" />
+		<edge from-layer="2423" from-port="0" to-layer="2424" to-port="1" />
+		<edge from-layer="2424" from-port="2" to-layer="2426" to-port="0" />
+		<edge from-layer="2425" from-port="0" to-layer="2426" to-port="1" />
+		<edge from-layer="2426" from-port="2" to-layer="2427" to-port="1" />
+		<edge from-layer="2427" from-port="2" to-layer="2432" to-port="0" />
+		<edge from-layer="2427" from-port="2" to-layer="2429" to-port="0" />
+		<edge from-layer="2428" from-port="0" to-layer="2429" to-port="1" />
+		<edge from-layer="2429" from-port="2" to-layer="2431" to-port="0" />
+		<edge from-layer="2430" from-port="0" to-layer="2431" to-port="1" />
+		<edge from-layer="2431" from-port="2" to-layer="2433" to-port="0" />
+		<edge from-layer="2432" from-port="1" to-layer="2433" to-port="1" />
+		<edge from-layer="2433" from-port="2" to-layer="2436" to-port="0" />
+		<edge from-layer="2434" from-port="0" to-layer="2435" to-port="0" />
+		<edge from-layer="2435" from-port="1" to-layer="2436" to-port="1" />
+		<edge from-layer="2436" from-port="2" to-layer="2439" to-port="0" />
+		<edge from-layer="2437" from-port="0" to-layer="2438" to-port="0" />
+		<edge from-layer="2438" from-port="1" to-layer="2439" to-port="1" />
+		<edge from-layer="2439" from-port="2" to-layer="2440" to-port="0" />
+		<edge from-layer="2440" from-port="1" to-layer="2443" to-port="0" />
+		<edge from-layer="2441" from-port="0" to-layer="2442" to-port="0" />
+		<edge from-layer="2442" from-port="1" to-layer="2443" to-port="1" />
+		<edge from-layer="2443" from-port="2" to-layer="2446" to-port="0" />
+		<edge from-layer="2444" from-port="0" to-layer="2445" to-port="0" />
+		<edge from-layer="2445" from-port="1" to-layer="2446" to-port="1" />
+		<edge from-layer="2446" from-port="2" to-layer="2447" to-port="1" />
+		<edge from-layer="2447" from-port="2" to-layer="2452" to-port="0" />
+		<edge from-layer="2447" from-port="2" to-layer="2628" to-port="1" />
+		<edge from-layer="2447" from-port="2" to-layer="2449" to-port="0" />
+		<edge from-layer="2448" from-port="0" to-layer="2449" to-port="1" />
+		<edge from-layer="2449" from-port="2" to-layer="2451" to-port="0" />
+		<edge from-layer="2450" from-port="0" to-layer="2451" to-port="1" />
+		<edge from-layer="2451" from-port="2" to-layer="2453" to-port="0" />
+		<edge from-layer="2452" from-port="1" to-layer="2453" to-port="1" />
+		<edge from-layer="2453" from-port="2" to-layer="2456" to-port="0" />
+		<edge from-layer="2454" from-port="0" to-layer="2455" to-port="0" />
+		<edge from-layer="2455" from-port="1" to-layer="2456" to-port="1" />
+		<edge from-layer="2456" from-port="2" to-layer="2459" to-port="0" />
+		<edge from-layer="2457" from-port="0" to-layer="2458" to-port="0" />
+		<edge from-layer="2458" from-port="1" to-layer="2459" to-port="1" />
+		<edge from-layer="2459" from-port="2" to-layer="2462" to-port="0" />
+		<edge from-layer="2460" from-port="0" to-layer="2461" to-port="0" />
+		<edge from-layer="2461" from-port="1" to-layer="2462" to-port="1" />
+		<edge from-layer="2462" from-port="2" to-layer="2465" to-port="0" />
+		<edge from-layer="2463" from-port="0" to-layer="2464" to-port="0" />
+		<edge from-layer="2464" from-port="1" to-layer="2465" to-port="1" />
+		<edge from-layer="2465" from-port="2" to-layer="2467" to-port="0" />
+		<edge from-layer="2466" from-port="0" to-layer="2467" to-port="1" />
+		<edge from-layer="2467" from-port="2" to-layer="2469" to-port="0" />
+		<edge from-layer="2468" from-port="0" to-layer="2469" to-port="1" />
+		<edge from-layer="2469" from-port="2" to-layer="2471" to-port="0" />
+		<edge from-layer="2469" from-port="2" to-layer="2521" to-port="1" />
+		<edge from-layer="2470" from-port="0" to-layer="2471" to-port="1" />
+		<edge from-layer="2471" from-port="2" to-layer="2474" to-port="0" />
+		<edge from-layer="2472" from-port="0" to-layer="2473" to-port="0" />
+		<edge from-layer="2473" from-port="1" to-layer="2474" to-port="1" />
+		<edge from-layer="2474" from-port="2" to-layer="2477" to-port="0" />
+		<edge from-layer="2475" from-port="0" to-layer="2476" to-port="0" />
+		<edge from-layer="2476" from-port="1" to-layer="2477" to-port="1" />
+		<edge from-layer="2477" from-port="2" to-layer="2480" to-port="0" />
+		<edge from-layer="2477" from-port="2" to-layer="2503" to-port="0" />
+		<edge from-layer="2477" from-port="2" to-layer="2489" to-port="0" />
+		<edge from-layer="2478" from-port="0" to-layer="2479" to-port="0" />
+		<edge from-layer="2479" from-port="1" to-layer="2480" to-port="1" />
+		<edge from-layer="2480" from-port="2" to-layer="2482" to-port="0" />
+		<edge from-layer="2481" from-port="0" to-layer="2482" to-port="1" />
+		<edge from-layer="2482" from-port="2" to-layer="2484" to-port="0" />
+		<edge from-layer="2483" from-port="0" to-layer="2484" to-port="1" />
+		<edge from-layer="2484" from-port="2" to-layer="2486" to-port="0" />
+		<edge from-layer="2485" from-port="0" to-layer="2486" to-port="1" />
+		<edge from-layer="2486" from-port="2" to-layer="2499" to-port="0" />
+		<edge from-layer="2487" from-port="0" to-layer="2488" to-port="0" />
+		<edge from-layer="2488" from-port="1" to-layer="2489" to-port="1" />
+		<edge from-layer="2489" from-port="2" to-layer="2491" to-port="0" />
+		<edge from-layer="2490" from-port="0" to-layer="2491" to-port="1" />
+		<edge from-layer="2491" from-port="2" to-layer="2493" to-port="0" />
+		<edge from-layer="2492" from-port="0" to-layer="2493" to-port="1" />
+		<edge from-layer="2493" from-port="2" to-layer="2495" to-port="0" />
+		<edge from-layer="2494" from-port="0" to-layer="2495" to-port="1" />
+		<edge from-layer="2495" from-port="2" to-layer="2498" to-port="0" />
+		<edge from-layer="2496" from-port="0" to-layer="2497" to-port="0" />
+		<edge from-layer="2497" from-port="1" to-layer="2498" to-port="1" />
+		<edge from-layer="2498" from-port="2" to-layer="2499" to-port="1" />
+		<edge from-layer="2499" from-port="2" to-layer="2500" to-port="0" />
+		<edge from-layer="2500" from-port="1" to-layer="2510" to-port="0" />
+		<edge from-layer="2501" from-port="0" to-layer="2502" to-port="0" />
+		<edge from-layer="2502" from-port="1" to-layer="2503" to-port="1" />
+		<edge from-layer="2503" from-port="2" to-layer="2505" to-port="0" />
+		<edge from-layer="2504" from-port="0" to-layer="2505" to-port="1" />
+		<edge from-layer="2505" from-port="2" to-layer="2507" to-port="0" />
+		<edge from-layer="2506" from-port="0" to-layer="2507" to-port="1" />
+		<edge from-layer="2507" from-port="2" to-layer="2509" to-port="0" />
+		<edge from-layer="2508" from-port="0" to-layer="2509" to-port="1" />
+		<edge from-layer="2509" from-port="2" to-layer="2510" to-port="1" />
+		<edge from-layer="2510" from-port="2" to-layer="2512" to-port="0" />
+		<edge from-layer="2511" from-port="0" to-layer="2512" to-port="1" />
+		<edge from-layer="2512" from-port="2" to-layer="2514" to-port="0" />
+		<edge from-layer="2513" from-port="0" to-layer="2514" to-port="1" />
+		<edge from-layer="2514" from-port="2" to-layer="2516" to-port="0" />
+		<edge from-layer="2515" from-port="0" to-layer="2516" to-port="1" />
+		<edge from-layer="2516" from-port="2" to-layer="2519" to-port="0" />
+		<edge from-layer="2517" from-port="0" to-layer="2518" to-port="0" />
+		<edge from-layer="2518" from-port="1" to-layer="2519" to-port="1" />
+		<edge from-layer="2519" from-port="2" to-layer="2520" to-port="1" />
+		<edge from-layer="2520" from-port="2" to-layer="2521" to-port="0" />
+		<edge from-layer="2521" from-port="2" to-layer="2573" to-port="1" />
+		<edge from-layer="2521" from-port="2" to-layer="2523" to-port="0" />
+		<edge from-layer="2522" from-port="0" to-layer="2523" to-port="1" />
+		<edge from-layer="2523" from-port="2" to-layer="2526" to-port="0" />
+		<edge from-layer="2524" from-port="0" to-layer="2525" to-port="0" />
+		<edge from-layer="2525" from-port="1" to-layer="2526" to-port="1" />
+		<edge from-layer="2526" from-port="2" to-layer="2529" to-port="0" />
+		<edge from-layer="2527" from-port="0" to-layer="2528" to-port="0" />
+		<edge from-layer="2528" from-port="1" to-layer="2529" to-port="1" />
+		<edge from-layer="2529" from-port="2" to-layer="2532" to-port="0" />
+		<edge from-layer="2530" from-port="0" to-layer="2531" to-port="0" />
+		<edge from-layer="2531" from-port="1" to-layer="2532" to-port="1" />
+		<edge from-layer="2532" from-port="2" to-layer="2534" to-port="0" />
+		<edge from-layer="2533" from-port="0" to-layer="2534" to-port="1" />
+		<edge from-layer="2534" from-port="2" to-layer="2536" to-port="0" />
+		<edge from-layer="2535" from-port="0" to-layer="2536" to-port="1" />
+		<edge from-layer="2536" from-port="2" to-layer="2538" to-port="0" />
+		<edge from-layer="2537" from-port="0" to-layer="2538" to-port="1" />
+		<edge from-layer="2538" from-port="2" to-layer="2551" to-port="0" />
+		<edge from-layer="2539" from-port="0" to-layer="2540" to-port="0" />
+		<edge from-layer="2540" from-port="1" to-layer="2541" to-port="1" />
+		<edge from-layer="2541" from-port="2" to-layer="2543" to-port="0" />
+		<edge from-layer="2542" from-port="0" to-layer="2543" to-port="1" />
+		<edge from-layer="2543" from-port="2" to-layer="2545" to-port="0" />
+		<edge from-layer="2544" from-port="0" to-layer="2545" to-port="1" />
+		<edge from-layer="2545" from-port="2" to-layer="2547" to-port="0" />
+		<edge from-layer="2546" from-port="0" to-layer="2547" to-port="1" />
+		<edge from-layer="2547" from-port="2" to-layer="2550" to-port="0" />
+		<edge from-layer="2548" from-port="0" to-layer="2549" to-port="0" />
+		<edge from-layer="2549" from-port="1" to-layer="2550" to-port="1" />
+		<edge from-layer="2550" from-port="2" to-layer="2551" to-port="1" />
+		<edge from-layer="2551" from-port="2" to-layer="2552" to-port="0" />
+		<edge from-layer="2552" from-port="1" to-layer="2562" to-port="0" />
+		<edge from-layer="2553" from-port="0" to-layer="2554" to-port="0" />
+		<edge from-layer="2554" from-port="1" to-layer="2555" to-port="1" />
+		<edge from-layer="2555" from-port="2" to-layer="2557" to-port="0" />
+		<edge from-layer="2556" from-port="0" to-layer="2557" to-port="1" />
+		<edge from-layer="2557" from-port="2" to-layer="2559" to-port="0" />
+		<edge from-layer="2558" from-port="0" to-layer="2559" to-port="1" />
+		<edge from-layer="2559" from-port="2" to-layer="2561" to-port="0" />
+		<edge from-layer="2560" from-port="0" to-layer="2561" to-port="1" />
+		<edge from-layer="2561" from-port="2" to-layer="2562" to-port="1" />
+		<edge from-layer="2562" from-port="2" to-layer="2564" to-port="0" />
+		<edge from-layer="2563" from-port="0" to-layer="2564" to-port="1" />
+		<edge from-layer="2564" from-port="2" to-layer="2566" to-port="0" />
+		<edge from-layer="2565" from-port="0" to-layer="2566" to-port="1" />
+		<edge from-layer="2566" from-port="2" to-layer="2568" to-port="0" />
+		<edge from-layer="2567" from-port="0" to-layer="2568" to-port="1" />
+		<edge from-layer="2568" from-port="2" to-layer="2571" to-port="0" />
+		<edge from-layer="2569" from-port="0" to-layer="2570" to-port="0" />
+		<edge from-layer="2570" from-port="1" to-layer="2571" to-port="1" />
+		<edge from-layer="2571" from-port="2" to-layer="2572" to-port="1" />
+		<edge from-layer="2572" from-port="2" to-layer="2573" to-port="0" />
+		<edge from-layer="2573" from-port="2" to-layer="2575" to-port="0" />
+		<edge from-layer="2573" from-port="2" to-layer="2617" to-port="1" />
+		<edge from-layer="2574" from-port="0" to-layer="2575" to-port="1" />
+		<edge from-layer="2575" from-port="2" to-layer="2578" to-port="0" />
+		<edge from-layer="2576" from-port="0" to-layer="2577" to-port="0" />
+		<edge from-layer="2577" from-port="1" to-layer="2578" to-port="1" />
+		<edge from-layer="2578" from-port="2" to-layer="2581" to-port="0" />
+		<edge from-layer="2579" from-port="0" to-layer="2580" to-port="0" />
+		<edge from-layer="2580" from-port="1" to-layer="2581" to-port="1" />
+		<edge from-layer="2581" from-port="2" to-layer="2584" to-port="0" />
+		<edge from-layer="2582" from-port="0" to-layer="2583" to-port="0" />
+		<edge from-layer="2583" from-port="1" to-layer="2584" to-port="1" />
+		<edge from-layer="2584" from-port="2" to-layer="2585" to-port="1" />
+		<edge from-layer="2585" from-port="2" to-layer="2589" to-port="0" />
+		<edge from-layer="2585" from-port="2" to-layer="2600" to-port="0" />
+		<edge from-layer="2585" from-port="2" to-layer="2610" to-port="0" />
+		<edge from-layer="2586" from-port="0" to-layer="2600" to-port="1" />
+		<edge from-layer="2587" from-port="0" to-layer="2598" to-port="0" />
+		<edge from-layer="2588" from-port="0" to-layer="2598" to-port="1" />
+		<edge from-layer="2589" from-port="1" to-layer="2592" to-port="0" />
+		<edge from-layer="2590" from-port="0" to-layer="2592" to-port="1" />
+		<edge from-layer="2591" from-port="0" to-layer="2592" to-port="2" />
+		<edge from-layer="2592" from-port="3" to-layer="2594" to-port="0" />
+		<edge from-layer="2593" from-port="0" to-layer="2594" to-port="1" />
+		<edge from-layer="2594" from-port="2" to-layer="2596" to-port="0" />
+		<edge from-layer="2595" from-port="0" to-layer="2596" to-port="1" />
+		<edge from-layer="2596" from-port="2" to-layer="2598" to-port="2" />
+		<edge from-layer="2596" from-port="2" to-layer="2604" to-port="2" />
+		<edge from-layer="2596" from-port="2" to-layer="2607" to-port="0" />
+		<edge from-layer="2597" from-port="0" to-layer="2598" to-port="3" />
+		<edge from-layer="2598" from-port="4" to-layer="2600" to-port="2" />
+		<edge from-layer="2599" from-port="0" to-layer="2600" to-port="3" />
+		<edge from-layer="2600" from-port="4" to-layer="2612" to-port="0" />
+		<edge from-layer="2601" from-port="0" to-layer="2604" to-port="0" />
+		<edge from-layer="2602" from-port="0" to-layer="2604" to-port="1" />
+		<edge from-layer="2602" from-port="0" to-layer="2608" to-port="1" />
+		<edge from-layer="2603" from-port="0" to-layer="2604" to-port="3" />
+		<edge from-layer="2603" from-port="0" to-layer="2608" to-port="3" />
+		<edge from-layer="2604" from-port="4" to-layer="2610" to-port="1" />
+		<edge from-layer="2605" from-port="0" to-layer="2608" to-port="0" />
+		<edge from-layer="2606" from-port="0" to-layer="2607" to-port="1" />
+		<edge from-layer="2607" from-port="2" to-layer="2608" to-port="2" />
+		<edge from-layer="2608" from-port="4" to-layer="2610" to-port="2" />
+		<edge from-layer="2609" from-port="0" to-layer="2610" to-port="3" />
+		<edge from-layer="2610" from-port="4" to-layer="2611" to-port="0" />
+		<edge from-layer="2611" from-port="1" to-layer="2612" to-port="1" />
+		<edge from-layer="2612" from-port="2" to-layer="2615" to-port="0" />
+		<edge from-layer="2613" from-port="0" to-layer="2614" to-port="0" />
+		<edge from-layer="2614" from-port="1" to-layer="2615" to-port="1" />
+		<edge from-layer="2615" from-port="2" to-layer="2616" to-port="1" />
+		<edge from-layer="2616" from-port="2" to-layer="2617" to-port="0" />
+		<edge from-layer="2617" from-port="2" to-layer="2619" to-port="0" />
+		<edge from-layer="2618" from-port="0" to-layer="2619" to-port="1" />
+		<edge from-layer="2619" from-port="2" to-layer="2621" to-port="0" />
+		<edge from-layer="2620" from-port="0" to-layer="2621" to-port="1" />
+		<edge from-layer="2621" from-port="2" to-layer="2624" to-port="0" />
+		<edge from-layer="2622" from-port="0" to-layer="2623" to-port="0" />
+		<edge from-layer="2623" from-port="1" to-layer="2624" to-port="1" />
+		<edge from-layer="2624" from-port="2" to-layer="2627" to-port="0" />
+		<edge from-layer="2625" from-port="0" to-layer="2626" to-port="0" />
+		<edge from-layer="2626" from-port="1" to-layer="2627" to-port="1" />
+		<edge from-layer="2627" from-port="2" to-layer="2628" to-port="0" />
+		<edge from-layer="2628" from-port="2" to-layer="2629" to-port="0" />
+		<edge from-layer="2629" from-port="2" to-layer="2632" to-port="0" />
+		<edge from-layer="2629" from-port="2" to-layer="2637" to-port="0" />
+		<edge from-layer="2629" from-port="2" to-layer="2640" to-port="0" />
+		<edge from-layer="2630" from-port="0" to-layer="2631" to-port="0" />
+		<edge from-layer="2631" from-port="1" to-layer="2632" to-port="1" />
+		<edge from-layer="2632" from-port="2" to-layer="2635" to-port="0" />
+		<edge from-layer="2633" from-port="0" to-layer="2634" to-port="0" />
+		<edge from-layer="2634" from-port="1" to-layer="2635" to-port="1" />
+		<edge from-layer="2635" from-port="2" to-layer="2685" to-port="0" />
+		<edge from-layer="2636" from-port="0" to-layer="2637" to-port="1" />
+		<edge from-layer="2637" from-port="2" to-layer="2639" to-port="0" />
+		<edge from-layer="2638" from-port="0" to-layer="2639" to-port="1" />
+		<edge from-layer="2639" from-port="2" to-layer="2641" to-port="0" />
+		<edge from-layer="2640" from-port="1" to-layer="2641" to-port="1" />
+		<edge from-layer="2641" from-port="2" to-layer="2644" to-port="0" />
+		<edge from-layer="2642" from-port="0" to-layer="2643" to-port="0" />
+		<edge from-layer="2643" from-port="1" to-layer="2644" to-port="1" />
+		<edge from-layer="2644" from-port="2" to-layer="2647" to-port="0" />
+		<edge from-layer="2645" from-port="0" to-layer="2646" to-port="0" />
+		<edge from-layer="2646" from-port="1" to-layer="2647" to-port="1" />
+		<edge from-layer="2647" from-port="2" to-layer="2648" to-port="0" />
+		<edge from-layer="2648" from-port="1" to-layer="2651" to-port="0" />
+		<edge from-layer="2649" from-port="0" to-layer="2650" to-port="0" />
+		<edge from-layer="2650" from-port="1" to-layer="2651" to-port="1" />
+		<edge from-layer="2651" from-port="2" to-layer="2654" to-port="0" />
+		<edge from-layer="2652" from-port="0" to-layer="2653" to-port="0" />
+		<edge from-layer="2653" from-port="1" to-layer="2654" to-port="1" />
+		<edge from-layer="2654" from-port="2" to-layer="2665" to-port="0" />
+		<edge from-layer="2655" from-port="0" to-layer="2656" to-port="0" />
+		<edge from-layer="2656" from-port="1" to-layer="2657" to-port="1" />
+		<edge from-layer="2657" from-port="2" to-layer="2660" to-port="0" />
+		<edge from-layer="2658" from-port="0" to-layer="2659" to-port="0" />
+		<edge from-layer="2659" from-port="1" to-layer="2660" to-port="1" />
+		<edge from-layer="2660" from-port="2" to-layer="2662" to-port="0" />
+		<edge from-layer="2661" from-port="0" to-layer="2662" to-port="1" />
+		<edge from-layer="2662" from-port="2" to-layer="2664" to-port="0" />
+		<edge from-layer="2663" from-port="0" to-layer="2664" to-port="1" />
+		<edge from-layer="2664" from-port="2" to-layer="2665" to-port="1" />
+		<edge from-layer="2665" from-port="2" to-layer="2670" to-port="0" />
+		<edge from-layer="2665" from-port="2" to-layer="2667" to-port="0" />
+		<edge from-layer="2666" from-port="0" to-layer="2667" to-port="1" />
+		<edge from-layer="2667" from-port="2" to-layer="2669" to-port="0" />
+		<edge from-layer="2668" from-port="0" to-layer="2669" to-port="1" />
+		<edge from-layer="2669" from-port="2" to-layer="2671" to-port="0" />
+		<edge from-layer="2670" from-port="1" to-layer="2671" to-port="1" />
+		<edge from-layer="2671" from-port="2" to-layer="2674" to-port="0" />
+		<edge from-layer="2672" from-port="0" to-layer="2673" to-port="0" />
+		<edge from-layer="2673" from-port="1" to-layer="2674" to-port="1" />
+		<edge from-layer="2674" from-port="2" to-layer="2677" to-port="0" />
+		<edge from-layer="2675" from-port="0" to-layer="2676" to-port="0" />
+		<edge from-layer="2676" from-port="1" to-layer="2677" to-port="1" />
+		<edge from-layer="2677" from-port="2" to-layer="2678" to-port="0" />
+		<edge from-layer="2678" from-port="1" to-layer="2681" to-port="0" />
+		<edge from-layer="2679" from-port="0" to-layer="2680" to-port="0" />
+		<edge from-layer="2680" from-port="1" to-layer="2681" to-port="1" />
+		<edge from-layer="2681" from-port="2" to-layer="2684" to-port="0" />
+		<edge from-layer="2682" from-port="0" to-layer="2683" to-port="0" />
+		<edge from-layer="2683" from-port="1" to-layer="2684" to-port="1" />
+		<edge from-layer="2684" from-port="2" to-layer="2685" to-port="1" />
+		<edge from-layer="2685" from-port="2" to-layer="2687" to-port="0" />
+		<edge from-layer="2685" from-port="2" to-layer="2866" to-port="1" />
+		<edge from-layer="2685" from-port="2" to-layer="2690" to-port="0" />
+		<edge from-layer="2686" from-port="0" to-layer="2687" to-port="1" />
+		<edge from-layer="2687" from-port="2" to-layer="2689" to-port="0" />
+		<edge from-layer="2688" from-port="0" to-layer="2689" to-port="1" />
+		<edge from-layer="2689" from-port="2" to-layer="2691" to-port="0" />
+		<edge from-layer="2690" from-port="1" to-layer="2691" to-port="1" />
+		<edge from-layer="2691" from-port="2" to-layer="2694" to-port="0" />
+		<edge from-layer="2692" from-port="0" to-layer="2693" to-port="0" />
+		<edge from-layer="2693" from-port="1" to-layer="2694" to-port="1" />
+		<edge from-layer="2694" from-port="2" to-layer="2697" to-port="0" />
+		<edge from-layer="2695" from-port="0" to-layer="2696" to-port="0" />
+		<edge from-layer="2696" from-port="1" to-layer="2697" to-port="1" />
+		<edge from-layer="2697" from-port="2" to-layer="2700" to-port="0" />
+		<edge from-layer="2698" from-port="0" to-layer="2699" to-port="0" />
+		<edge from-layer="2699" from-port="1" to-layer="2700" to-port="1" />
+		<edge from-layer="2700" from-port="2" to-layer="2703" to-port="0" />
+		<edge from-layer="2701" from-port="0" to-layer="2702" to-port="0" />
+		<edge from-layer="2702" from-port="1" to-layer="2703" to-port="1" />
+		<edge from-layer="2703" from-port="2" to-layer="2705" to-port="0" />
+		<edge from-layer="2704" from-port="0" to-layer="2705" to-port="1" />
+		<edge from-layer="2705" from-port="2" to-layer="2707" to-port="0" />
+		<edge from-layer="2706" from-port="0" to-layer="2707" to-port="1" />
+		<edge from-layer="2707" from-port="2" to-layer="2709" to-port="0" />
+		<edge from-layer="2707" from-port="2" to-layer="2759" to-port="1" />
+		<edge from-layer="2708" from-port="0" to-layer="2709" to-port="1" />
+		<edge from-layer="2709" from-port="2" to-layer="2712" to-port="0" />
+		<edge from-layer="2710" from-port="0" to-layer="2711" to-port="0" />
+		<edge from-layer="2711" from-port="1" to-layer="2712" to-port="1" />
+		<edge from-layer="2712" from-port="2" to-layer="2715" to-port="0" />
+		<edge from-layer="2713" from-port="0" to-layer="2714" to-port="0" />
+		<edge from-layer="2714" from-port="1" to-layer="2715" to-port="1" />
+		<edge from-layer="2715" from-port="2" to-layer="2718" to-port="0" />
+		<edge from-layer="2715" from-port="2" to-layer="2741" to-port="0" />
+		<edge from-layer="2715" from-port="2" to-layer="2727" to-port="0" />
+		<edge from-layer="2716" from-port="0" to-layer="2717" to-port="0" />
+		<edge from-layer="2717" from-port="1" to-layer="2718" to-port="1" />
+		<edge from-layer="2718" from-port="2" to-layer="2720" to-port="0" />
+		<edge from-layer="2719" from-port="0" to-layer="2720" to-port="1" />
+		<edge from-layer="2720" from-port="2" to-layer="2722" to-port="0" />
+		<edge from-layer="2721" from-port="0" to-layer="2722" to-port="1" />
+		<edge from-layer="2722" from-port="2" to-layer="2724" to-port="0" />
+		<edge from-layer="2723" from-port="0" to-layer="2724" to-port="1" />
+		<edge from-layer="2724" from-port="2" to-layer="2737" to-port="0" />
+		<edge from-layer="2725" from-port="0" to-layer="2726" to-port="0" />
+		<edge from-layer="2726" from-port="1" to-layer="2727" to-port="1" />
+		<edge from-layer="2727" from-port="2" to-layer="2729" to-port="0" />
+		<edge from-layer="2728" from-port="0" to-layer="2729" to-port="1" />
+		<edge from-layer="2729" from-port="2" to-layer="2731" to-port="0" />
+		<edge from-layer="2730" from-port="0" to-layer="2731" to-port="1" />
+		<edge from-layer="2731" from-port="2" to-layer="2733" to-port="0" />
+		<edge from-layer="2732" from-port="0" to-layer="2733" to-port="1" />
+		<edge from-layer="2733" from-port="2" to-layer="2736" to-port="0" />
+		<edge from-layer="2734" from-port="0" to-layer="2735" to-port="0" />
+		<edge from-layer="2735" from-port="1" to-layer="2736" to-port="1" />
+		<edge from-layer="2736" from-port="2" to-layer="2737" to-port="1" />
+		<edge from-layer="2737" from-port="2" to-layer="2738" to-port="0" />
+		<edge from-layer="2738" from-port="1" to-layer="2748" to-port="0" />
+		<edge from-layer="2739" from-port="0" to-layer="2740" to-port="0" />
+		<edge from-layer="2740" from-port="1" to-layer="2741" to-port="1" />
+		<edge from-layer="2741" from-port="2" to-layer="2743" to-port="0" />
+		<edge from-layer="2742" from-port="0" to-layer="2743" to-port="1" />
+		<edge from-layer="2743" from-port="2" to-layer="2745" to-port="0" />
+		<edge from-layer="2744" from-port="0" to-layer="2745" to-port="1" />
+		<edge from-layer="2745" from-port="2" to-layer="2747" to-port="0" />
+		<edge from-layer="2746" from-port="0" to-layer="2747" to-port="1" />
+		<edge from-layer="2747" from-port="2" to-layer="2748" to-port="1" />
+		<edge from-layer="2748" from-port="2" to-layer="2750" to-port="0" />
+		<edge from-layer="2749" from-port="0" to-layer="2750" to-port="1" />
+		<edge from-layer="2750" from-port="2" to-layer="2752" to-port="0" />
+		<edge from-layer="2751" from-port="0" to-layer="2752" to-port="1" />
+		<edge from-layer="2752" from-port="2" to-layer="2754" to-port="0" />
+		<edge from-layer="2753" from-port="0" to-layer="2754" to-port="1" />
+		<edge from-layer="2754" from-port="2" to-layer="2757" to-port="0" />
+		<edge from-layer="2755" from-port="0" to-layer="2756" to-port="0" />
+		<edge from-layer="2756" from-port="1" to-layer="2757" to-port="1" />
+		<edge from-layer="2757" from-port="2" to-layer="2758" to-port="1" />
+		<edge from-layer="2758" from-port="2" to-layer="2759" to-port="0" />
+		<edge from-layer="2759" from-port="2" to-layer="2761" to-port="0" />
+		<edge from-layer="2759" from-port="2" to-layer="2811" to-port="1" />
+		<edge from-layer="2760" from-port="0" to-layer="2761" to-port="1" />
+		<edge from-layer="2761" from-port="2" to-layer="2764" to-port="0" />
+		<edge from-layer="2762" from-port="0" to-layer="2763" to-port="0" />
+		<edge from-layer="2763" from-port="1" to-layer="2764" to-port="1" />
+		<edge from-layer="2764" from-port="2" to-layer="2767" to-port="0" />
+		<edge from-layer="2765" from-port="0" to-layer="2766" to-port="0" />
+		<edge from-layer="2766" from-port="1" to-layer="2767" to-port="1" />
+		<edge from-layer="2767" from-port="2" to-layer="2770" to-port="0" />
+		<edge from-layer="2768" from-port="0" to-layer="2769" to-port="0" />
+		<edge from-layer="2769" from-port="1" to-layer="2770" to-port="1" />
+		<edge from-layer="2770" from-port="2" to-layer="2772" to-port="0" />
+		<edge from-layer="2771" from-port="0" to-layer="2772" to-port="1" />
+		<edge from-layer="2772" from-port="2" to-layer="2774" to-port="0" />
+		<edge from-layer="2773" from-port="0" to-layer="2774" to-port="1" />
+		<edge from-layer="2774" from-port="2" to-layer="2776" to-port="0" />
+		<edge from-layer="2775" from-port="0" to-layer="2776" to-port="1" />
+		<edge from-layer="2776" from-port="2" to-layer="2789" to-port="0" />
+		<edge from-layer="2777" from-port="0" to-layer="2778" to-port="0" />
+		<edge from-layer="2778" from-port="1" to-layer="2779" to-port="1" />
+		<edge from-layer="2779" from-port="2" to-layer="2781" to-port="0" />
+		<edge from-layer="2780" from-port="0" to-layer="2781" to-port="1" />
+		<edge from-layer="2781" from-port="2" to-layer="2783" to-port="0" />
+		<edge from-layer="2782" from-port="0" to-layer="2783" to-port="1" />
+		<edge from-layer="2783" from-port="2" to-layer="2785" to-port="0" />
+		<edge from-layer="2784" from-port="0" to-layer="2785" to-port="1" />
+		<edge from-layer="2785" from-port="2" to-layer="2788" to-port="0" />
+		<edge from-layer="2786" from-port="0" to-layer="2787" to-port="0" />
+		<edge from-layer="2787" from-port="1" to-layer="2788" to-port="1" />
+		<edge from-layer="2788" from-port="2" to-layer="2789" to-port="1" />
+		<edge from-layer="2789" from-port="2" to-layer="2790" to-port="0" />
+		<edge from-layer="2790" from-port="1" to-layer="2800" to-port="0" />
+		<edge from-layer="2791" from-port="0" to-layer="2792" to-port="0" />
+		<edge from-layer="2792" from-port="1" to-layer="2793" to-port="1" />
+		<edge from-layer="2793" from-port="2" to-layer="2795" to-port="0" />
+		<edge from-layer="2794" from-port="0" to-layer="2795" to-port="1" />
+		<edge from-layer="2795" from-port="2" to-layer="2797" to-port="0" />
+		<edge from-layer="2796" from-port="0" to-layer="2797" to-port="1" />
+		<edge from-layer="2797" from-port="2" to-layer="2799" to-port="0" />
+		<edge from-layer="2798" from-port="0" to-layer="2799" to-port="1" />
+		<edge from-layer="2799" from-port="2" to-layer="2800" to-port="1" />
+		<edge from-layer="2800" from-port="2" to-layer="2802" to-port="0" />
+		<edge from-layer="2801" from-port="0" to-layer="2802" to-port="1" />
+		<edge from-layer="2802" from-port="2" to-layer="2804" to-port="0" />
+		<edge from-layer="2803" from-port="0" to-layer="2804" to-port="1" />
+		<edge from-layer="2804" from-port="2" to-layer="2806" to-port="0" />
+		<edge from-layer="2805" from-port="0" to-layer="2806" to-port="1" />
+		<edge from-layer="2806" from-port="2" to-layer="2809" to-port="0" />
+		<edge from-layer="2807" from-port="0" to-layer="2808" to-port="0" />
+		<edge from-layer="2808" from-port="1" to-layer="2809" to-port="1" />
+		<edge from-layer="2809" from-port="2" to-layer="2810" to-port="1" />
+		<edge from-layer="2810" from-port="2" to-layer="2811" to-port="0" />
+		<edge from-layer="2811" from-port="2" to-layer="2813" to-port="0" />
+		<edge from-layer="2811" from-port="2" to-layer="2855" to-port="1" />
+		<edge from-layer="2812" from-port="0" to-layer="2813" to-port="1" />
+		<edge from-layer="2813" from-port="2" to-layer="2816" to-port="0" />
+		<edge from-layer="2814" from-port="0" to-layer="2815" to-port="0" />
+		<edge from-layer="2815" from-port="1" to-layer="2816" to-port="1" />
+		<edge from-layer="2816" from-port="2" to-layer="2819" to-port="0" />
+		<edge from-layer="2817" from-port="0" to-layer="2818" to-port="0" />
+		<edge from-layer="2818" from-port="1" to-layer="2819" to-port="1" />
+		<edge from-layer="2819" from-port="2" to-layer="2822" to-port="0" />
+		<edge from-layer="2820" from-port="0" to-layer="2821" to-port="0" />
+		<edge from-layer="2821" from-port="1" to-layer="2822" to-port="1" />
+		<edge from-layer="2822" from-port="2" to-layer="2823" to-port="1" />
+		<edge from-layer="2823" from-port="2" to-layer="2827" to-port="0" />
+		<edge from-layer="2823" from-port="2" to-layer="2848" to-port="0" />
+		<edge from-layer="2823" from-port="2" to-layer="2838" to-port="0" />
+		<edge from-layer="2824" from-port="0" to-layer="2838" to-port="1" />
+		<edge from-layer="2825" from-port="0" to-layer="2836" to-port="0" />
+		<edge from-layer="2826" from-port="0" to-layer="2836" to-port="1" />
+		<edge from-layer="2827" from-port="1" to-layer="2830" to-port="0" />
+		<edge from-layer="2828" from-port="0" to-layer="2830" to-port="1" />
+		<edge from-layer="2829" from-port="0" to-layer="2830" to-port="2" />
+		<edge from-layer="2830" from-port="3" to-layer="2832" to-port="0" />
+		<edge from-layer="2831" from-port="0" to-layer="2832" to-port="1" />
+		<edge from-layer="2832" from-port="2" to-layer="2834" to-port="0" />
+		<edge from-layer="2833" from-port="0" to-layer="2834" to-port="1" />
+		<edge from-layer="2834" from-port="2" to-layer="2842" to-port="2" />
+		<edge from-layer="2834" from-port="2" to-layer="2845" to-port="0" />
+		<edge from-layer="2834" from-port="2" to-layer="2836" to-port="2" />
+		<edge from-layer="2835" from-port="0" to-layer="2836" to-port="3" />
+		<edge from-layer="2836" from-port="4" to-layer="2838" to-port="2" />
+		<edge from-layer="2837" from-port="0" to-layer="2838" to-port="3" />
+		<edge from-layer="2838" from-port="4" to-layer="2850" to-port="0" />
+		<edge from-layer="2839" from-port="0" to-layer="2842" to-port="0" />
+		<edge from-layer="2840" from-port="0" to-layer="2842" to-port="1" />
+		<edge from-layer="2840" from-port="0" to-layer="2846" to-port="1" />
+		<edge from-layer="2841" from-port="0" to-layer="2842" to-port="3" />
+		<edge from-layer="2841" from-port="0" to-layer="2846" to-port="3" />
+		<edge from-layer="2842" from-port="4" to-layer="2848" to-port="1" />
+		<edge from-layer="2843" from-port="0" to-layer="2846" to-port="0" />
+		<edge from-layer="2844" from-port="0" to-layer="2845" to-port="1" />
+		<edge from-layer="2845" from-port="2" to-layer="2846" to-port="2" />
+		<edge from-layer="2846" from-port="4" to-layer="2848" to-port="2" />
+		<edge from-layer="2847" from-port="0" to-layer="2848" to-port="3" />
+		<edge from-layer="2848" from-port="4" to-layer="2849" to-port="0" />
+		<edge from-layer="2849" from-port="1" to-layer="2850" to-port="1" />
+		<edge from-layer="2850" from-port="2" to-layer="2853" to-port="0" />
+		<edge from-layer="2851" from-port="0" to-layer="2852" to-port="0" />
+		<edge from-layer="2852" from-port="1" to-layer="2853" to-port="1" />
+		<edge from-layer="2853" from-port="2" to-layer="2854" to-port="1" />
+		<edge from-layer="2854" from-port="2" to-layer="2855" to-port="0" />
+		<edge from-layer="2855" from-port="2" to-layer="2857" to-port="0" />
+		<edge from-layer="2856" from-port="0" to-layer="2857" to-port="1" />
+		<edge from-layer="2857" from-port="2" to-layer="2859" to-port="0" />
+		<edge from-layer="2858" from-port="0" to-layer="2859" to-port="1" />
+		<edge from-layer="2859" from-port="2" to-layer="2862" to-port="0" />
+		<edge from-layer="2860" from-port="0" to-layer="2861" to-port="0" />
+		<edge from-layer="2861" from-port="1" to-layer="2862" to-port="1" />
+		<edge from-layer="2862" from-port="2" to-layer="2865" to-port="0" />
+		<edge from-layer="2863" from-port="0" to-layer="2864" to-port="0" />
+		<edge from-layer="2864" from-port="1" to-layer="2865" to-port="1" />
+		<edge from-layer="2865" from-port="2" to-layer="2866" to-port="0" />
+		<edge from-layer="2866" from-port="2" to-layer="2872" to-port="0" />
+		<edge from-layer="2866" from-port="2" to-layer="2867" to-port="0" />
+		<edge from-layer="2867" from-port="1" to-layer="2868" to-port="0" />
+		<edge from-layer="2868" from-port="1" to-layer="2870" to-port="0" />
+		<edge from-layer="2869" from-port="0" to-layer="2872" to-port="2" />
+		<edge from-layer="2869" from-port="0" to-layer="2870" to-port="1" />
+		<edge from-layer="2870" from-port="2" to-layer="2871" to-port="0" />
+		<edge from-layer="2871" from-port="1" to-layer="2872" to-port="1" />
+		<edge from-layer="2872" from-port="3" to-layer="2875" to-port="0" />
+		<edge from-layer="2873" from-port="0" to-layer="2874" to-port="0" />
+		<edge from-layer="2874" from-port="1" to-layer="2875" to-port="1" />
+		<edge from-layer="2875" from-port="2" to-layer="2878" to-port="0" />
+		<edge from-layer="2876" from-port="0" to-layer="2877" to-port="0" />
+		<edge from-layer="2877" from-port="1" to-layer="2878" to-port="1" />
+		<edge from-layer="2878" from-port="2" to-layer="2879" to-port="0" />
+		<edge from-layer="2879" from-port="2" to-layer="2882" to-port="0" />
+		<edge from-layer="2879" from-port="2" to-layer="2887" to-port="0" />
+		<edge from-layer="2879" from-port="2" to-layer="2890" to-port="0" />
+		<edge from-layer="2880" from-port="0" to-layer="2881" to-port="0" />
+		<edge from-layer="2881" from-port="1" to-layer="2882" to-port="1" />
+		<edge from-layer="2882" from-port="2" to-layer="2885" to-port="0" />
+		<edge from-layer="2883" from-port="0" to-layer="2884" to-port="0" />
+		<edge from-layer="2884" from-port="1" to-layer="2885" to-port="1" />
+		<edge from-layer="2885" from-port="2" to-layer="2935" to-port="0" />
+		<edge from-layer="2886" from-port="0" to-layer="2887" to-port="1" />
+		<edge from-layer="2887" from-port="2" to-layer="2889" to-port="0" />
+		<edge from-layer="2888" from-port="0" to-layer="2889" to-port="1" />
+		<edge from-layer="2889" from-port="2" to-layer="2891" to-port="0" />
+		<edge from-layer="2890" from-port="1" to-layer="2891" to-port="1" />
+		<edge from-layer="2891" from-port="2" to-layer="2894" to-port="0" />
+		<edge from-layer="2892" from-port="0" to-layer="2893" to-port="0" />
+		<edge from-layer="2893" from-port="1" to-layer="2894" to-port="1" />
+		<edge from-layer="2894" from-port="2" to-layer="2897" to-port="0" />
+		<edge from-layer="2895" from-port="0" to-layer="2896" to-port="0" />
+		<edge from-layer="2896" from-port="1" to-layer="2897" to-port="1" />
+		<edge from-layer="2897" from-port="2" to-layer="2898" to-port="0" />
+		<edge from-layer="2898" from-port="1" to-layer="2901" to-port="0" />
+		<edge from-layer="2899" from-port="0" to-layer="2900" to-port="0" />
+		<edge from-layer="2900" from-port="1" to-layer="2901" to-port="1" />
+		<edge from-layer="2901" from-port="2" to-layer="2904" to-port="0" />
+		<edge from-layer="2902" from-port="0" to-layer="2903" to-port="0" />
+		<edge from-layer="2903" from-port="1" to-layer="2904" to-port="1" />
+		<edge from-layer="2904" from-port="2" to-layer="2915" to-port="0" />
+		<edge from-layer="2905" from-port="0" to-layer="2906" to-port="0" />
+		<edge from-layer="2906" from-port="1" to-layer="2907" to-port="1" />
+		<edge from-layer="2907" from-port="2" to-layer="2910" to-port="0" />
+		<edge from-layer="2908" from-port="0" to-layer="2909" to-port="0" />
+		<edge from-layer="2909" from-port="1" to-layer="2910" to-port="1" />
+		<edge from-layer="2910" from-port="2" to-layer="2912" to-port="0" />
+		<edge from-layer="2911" from-port="0" to-layer="2912" to-port="1" />
+		<edge from-layer="2912" from-port="2" to-layer="2914" to-port="0" />
+		<edge from-layer="2913" from-port="0" to-layer="2914" to-port="1" />
+		<edge from-layer="2914" from-port="2" to-layer="2915" to-port="1" />
+		<edge from-layer="2915" from-port="2" to-layer="2917" to-port="0" />
+		<edge from-layer="2915" from-port="2" to-layer="2920" to-port="0" />
+		<edge from-layer="2916" from-port="0" to-layer="2917" to-port="1" />
+		<edge from-layer="2917" from-port="2" to-layer="2919" to-port="0" />
+		<edge from-layer="2918" from-port="0" to-layer="2919" to-port="1" />
+		<edge from-layer="2919" from-port="2" to-layer="2921" to-port="0" />
+		<edge from-layer="2920" from-port="1" to-layer="2921" to-port="1" />
+		<edge from-layer="2921" from-port="2" to-layer="2924" to-port="0" />
+		<edge from-layer="2922" from-port="0" to-layer="2923" to-port="0" />
+		<edge from-layer="2923" from-port="1" to-layer="2924" to-port="1" />
+		<edge from-layer="2924" from-port="2" to-layer="2927" to-port="0" />
+		<edge from-layer="2925" from-port="0" to-layer="2926" to-port="0" />
+		<edge from-layer="2926" from-port="1" to-layer="2927" to-port="1" />
+		<edge from-layer="2927" from-port="2" to-layer="2928" to-port="0" />
+		<edge from-layer="2928" from-port="1" to-layer="2931" to-port="0" />
+		<edge from-layer="2929" from-port="0" to-layer="2930" to-port="0" />
+		<edge from-layer="2930" from-port="1" to-layer="2931" to-port="1" />
+		<edge from-layer="2931" from-port="2" to-layer="2934" to-port="0" />
+		<edge from-layer="2932" from-port="0" to-layer="2933" to-port="0" />
+		<edge from-layer="2933" from-port="1" to-layer="2934" to-port="1" />
+		<edge from-layer="2934" from-port="2" to-layer="2935" to-port="1" />
+		<edge from-layer="2935" from-port="2" to-layer="2940" to-port="0" />
+		<edge from-layer="2935" from-port="2" to-layer="3116" to-port="1" />
+		<edge from-layer="2935" from-port="2" to-layer="2937" to-port="0" />
+		<edge from-layer="2936" from-port="0" to-layer="2937" to-port="1" />
+		<edge from-layer="2937" from-port="2" to-layer="2939" to-port="0" />
+		<edge from-layer="2938" from-port="0" to-layer="2939" to-port="1" />
+		<edge from-layer="2939" from-port="2" to-layer="2941" to-port="0" />
+		<edge from-layer="2940" from-port="1" to-layer="2941" to-port="1" />
+		<edge from-layer="2941" from-port="2" to-layer="2944" to-port="0" />
+		<edge from-layer="2942" from-port="0" to-layer="2943" to-port="0" />
+		<edge from-layer="2943" from-port="1" to-layer="2944" to-port="1" />
+		<edge from-layer="2944" from-port="2" to-layer="2947" to-port="0" />
+		<edge from-layer="2945" from-port="0" to-layer="2946" to-port="0" />
+		<edge from-layer="2946" from-port="1" to-layer="2947" to-port="1" />
+		<edge from-layer="2947" from-port="2" to-layer="2950" to-port="0" />
+		<edge from-layer="2948" from-port="0" to-layer="2949" to-port="0" />
+		<edge from-layer="2949" from-port="1" to-layer="2950" to-port="1" />
+		<edge from-layer="2950" from-port="2" to-layer="2953" to-port="0" />
+		<edge from-layer="2951" from-port="0" to-layer="2952" to-port="0" />
+		<edge from-layer="2952" from-port="1" to-layer="2953" to-port="1" />
+		<edge from-layer="2953" from-port="2" to-layer="2955" to-port="0" />
+		<edge from-layer="2954" from-port="0" to-layer="2955" to-port="1" />
+		<edge from-layer="2955" from-port="2" to-layer="2957" to-port="0" />
+		<edge from-layer="2956" from-port="0" to-layer="2957" to-port="1" />
+		<edge from-layer="2957" from-port="2" to-layer="2959" to-port="0" />
+		<edge from-layer="2957" from-port="2" to-layer="3009" to-port="1" />
+		<edge from-layer="2958" from-port="0" to-layer="2959" to-port="1" />
+		<edge from-layer="2959" from-port="2" to-layer="2962" to-port="0" />
+		<edge from-layer="2960" from-port="0" to-layer="2961" to-port="0" />
+		<edge from-layer="2961" from-port="1" to-layer="2962" to-port="1" />
+		<edge from-layer="2962" from-port="2" to-layer="2965" to-port="0" />
+		<edge from-layer="2963" from-port="0" to-layer="2964" to-port="0" />
+		<edge from-layer="2964" from-port="1" to-layer="2965" to-port="1" />
+		<edge from-layer="2965" from-port="2" to-layer="2968" to-port="0" />
+		<edge from-layer="2965" from-port="2" to-layer="2977" to-port="0" />
+		<edge from-layer="2965" from-port="2" to-layer="2991" to-port="0" />
+		<edge from-layer="2966" from-port="0" to-layer="2967" to-port="0" />
+		<edge from-layer="2967" from-port="1" to-layer="2968" to-port="1" />
+		<edge from-layer="2968" from-port="2" to-layer="2970" to-port="0" />
+		<edge from-layer="2969" from-port="0" to-layer="2970" to-port="1" />
+		<edge from-layer="2970" from-port="2" to-layer="2972" to-port="0" />
+		<edge from-layer="2971" from-port="0" to-layer="2972" to-port="1" />
+		<edge from-layer="2972" from-port="2" to-layer="2974" to-port="0" />
+		<edge from-layer="2973" from-port="0" to-layer="2974" to-port="1" />
+		<edge from-layer="2974" from-port="2" to-layer="2987" to-port="0" />
+		<edge from-layer="2975" from-port="0" to-layer="2976" to-port="0" />
+		<edge from-layer="2976" from-port="1" to-layer="2977" to-port="1" />
+		<edge from-layer="2977" from-port="2" to-layer="2979" to-port="0" />
+		<edge from-layer="2978" from-port="0" to-layer="2979" to-port="1" />
+		<edge from-layer="2979" from-port="2" to-layer="2981" to-port="0" />
+		<edge from-layer="2980" from-port="0" to-layer="2981" to-port="1" />
+		<edge from-layer="2981" from-port="2" to-layer="2983" to-port="0" />
+		<edge from-layer="2982" from-port="0" to-layer="2983" to-port="1" />
+		<edge from-layer="2983" from-port="2" to-layer="2986" to-port="0" />
+		<edge from-layer="2984" from-port="0" to-layer="2985" to-port="0" />
+		<edge from-layer="2985" from-port="1" to-layer="2986" to-port="1" />
+		<edge from-layer="2986" from-port="2" to-layer="2987" to-port="1" />
+		<edge from-layer="2987" from-port="2" to-layer="2988" to-port="0" />
+		<edge from-layer="2988" from-port="1" to-layer="2998" to-port="0" />
+		<edge from-layer="2989" from-port="0" to-layer="2990" to-port="0" />
+		<edge from-layer="2990" from-port="1" to-layer="2991" to-port="1" />
+		<edge from-layer="2991" from-port="2" to-layer="2993" to-port="0" />
+		<edge from-layer="2992" from-port="0" to-layer="2993" to-port="1" />
+		<edge from-layer="2993" from-port="2" to-layer="2995" to-port="0" />
+		<edge from-layer="2994" from-port="0" to-layer="2995" to-port="1" />
+		<edge from-layer="2995" from-port="2" to-layer="2997" to-port="0" />
+		<edge from-layer="2996" from-port="0" to-layer="2997" to-port="1" />
+		<edge from-layer="2997" from-port="2" to-layer="2998" to-port="1" />
+		<edge from-layer="2998" from-port="2" to-layer="3000" to-port="0" />
+		<edge from-layer="2999" from-port="0" to-layer="3000" to-port="1" />
+		<edge from-layer="3000" from-port="2" to-layer="3002" to-port="0" />
+		<edge from-layer="3001" from-port="0" to-layer="3002" to-port="1" />
+		<edge from-layer="3002" from-port="2" to-layer="3004" to-port="0" />
+		<edge from-layer="3003" from-port="0" to-layer="3004" to-port="1" />
+		<edge from-layer="3004" from-port="2" to-layer="3007" to-port="0" />
+		<edge from-layer="3005" from-port="0" to-layer="3006" to-port="0" />
+		<edge from-layer="3006" from-port="1" to-layer="3007" to-port="1" />
+		<edge from-layer="3007" from-port="2" to-layer="3008" to-port="1" />
+		<edge from-layer="3008" from-port="2" to-layer="3009" to-port="0" />
+		<edge from-layer="3009" from-port="2" to-layer="3011" to-port="0" />
+		<edge from-layer="3009" from-port="2" to-layer="3061" to-port="1" />
+		<edge from-layer="3010" from-port="0" to-layer="3011" to-port="1" />
+		<edge from-layer="3011" from-port="2" to-layer="3014" to-port="0" />
+		<edge from-layer="3012" from-port="0" to-layer="3013" to-port="0" />
+		<edge from-layer="3013" from-port="1" to-layer="3014" to-port="1" />
+		<edge from-layer="3014" from-port="2" to-layer="3017" to-port="0" />
+		<edge from-layer="3015" from-port="0" to-layer="3016" to-port="0" />
+		<edge from-layer="3016" from-port="1" to-layer="3017" to-port="1" />
+		<edge from-layer="3017" from-port="2" to-layer="3020" to-port="0" />
+		<edge from-layer="3018" from-port="0" to-layer="3019" to-port="0" />
+		<edge from-layer="3019" from-port="1" to-layer="3020" to-port="1" />
+		<edge from-layer="3020" from-port="2" to-layer="3022" to-port="0" />
+		<edge from-layer="3021" from-port="0" to-layer="3022" to-port="1" />
+		<edge from-layer="3022" from-port="2" to-layer="3024" to-port="0" />
+		<edge from-layer="3023" from-port="0" to-layer="3024" to-port="1" />
+		<edge from-layer="3024" from-port="2" to-layer="3026" to-port="0" />
+		<edge from-layer="3025" from-port="0" to-layer="3026" to-port="1" />
+		<edge from-layer="3026" from-port="2" to-layer="3039" to-port="0" />
+		<edge from-layer="3027" from-port="0" to-layer="3028" to-port="0" />
+		<edge from-layer="3028" from-port="1" to-layer="3029" to-port="1" />
+		<edge from-layer="3029" from-port="2" to-layer="3031" to-port="0" />
+		<edge from-layer="3030" from-port="0" to-layer="3031" to-port="1" />
+		<edge from-layer="3031" from-port="2" to-layer="3033" to-port="0" />
+		<edge from-layer="3032" from-port="0" to-layer="3033" to-port="1" />
+		<edge from-layer="3033" from-port="2" to-layer="3035" to-port="0" />
+		<edge from-layer="3034" from-port="0" to-layer="3035" to-port="1" />
+		<edge from-layer="3035" from-port="2" to-layer="3038" to-port="0" />
+		<edge from-layer="3036" from-port="0" to-layer="3037" to-port="0" />
+		<edge from-layer="3037" from-port="1" to-layer="3038" to-port="1" />
+		<edge from-layer="3038" from-port="2" to-layer="3039" to-port="1" />
+		<edge from-layer="3039" from-port="2" to-layer="3040" to-port="0" />
+		<edge from-layer="3040" from-port="1" to-layer="3050" to-port="0" />
+		<edge from-layer="3041" from-port="0" to-layer="3042" to-port="0" />
+		<edge from-layer="3042" from-port="1" to-layer="3043" to-port="1" />
+		<edge from-layer="3043" from-port="2" to-layer="3045" to-port="0" />
+		<edge from-layer="3044" from-port="0" to-layer="3045" to-port="1" />
+		<edge from-layer="3045" from-port="2" to-layer="3047" to-port="0" />
+		<edge from-layer="3046" from-port="0" to-layer="3047" to-port="1" />
+		<edge from-layer="3047" from-port="2" to-layer="3049" to-port="0" />
+		<edge from-layer="3048" from-port="0" to-layer="3049" to-port="1" />
+		<edge from-layer="3049" from-port="2" to-layer="3050" to-port="1" />
+		<edge from-layer="3050" from-port="2" to-layer="3052" to-port="0" />
+		<edge from-layer="3051" from-port="0" to-layer="3052" to-port="1" />
+		<edge from-layer="3052" from-port="2" to-layer="3054" to-port="0" />
+		<edge from-layer="3053" from-port="0" to-layer="3054" to-port="1" />
+		<edge from-layer="3054" from-port="2" to-layer="3056" to-port="0" />
+		<edge from-layer="3055" from-port="0" to-layer="3056" to-port="1" />
+		<edge from-layer="3056" from-port="2" to-layer="3059" to-port="0" />
+		<edge from-layer="3057" from-port="0" to-layer="3058" to-port="0" />
+		<edge from-layer="3058" from-port="1" to-layer="3059" to-port="1" />
+		<edge from-layer="3059" from-port="2" to-layer="3060" to-port="1" />
+		<edge from-layer="3060" from-port="2" to-layer="3061" to-port="0" />
+		<edge from-layer="3061" from-port="2" to-layer="3105" to-port="1" />
+		<edge from-layer="3061" from-port="2" to-layer="3063" to-port="0" />
+		<edge from-layer="3062" from-port="0" to-layer="3063" to-port="1" />
+		<edge from-layer="3063" from-port="2" to-layer="3066" to-port="0" />
+		<edge from-layer="3064" from-port="0" to-layer="3065" to-port="0" />
+		<edge from-layer="3065" from-port="1" to-layer="3066" to-port="1" />
+		<edge from-layer="3066" from-port="2" to-layer="3069" to-port="0" />
+		<edge from-layer="3067" from-port="0" to-layer="3068" to-port="0" />
+		<edge from-layer="3068" from-port="1" to-layer="3069" to-port="1" />
+		<edge from-layer="3069" from-port="2" to-layer="3072" to-port="0" />
+		<edge from-layer="3070" from-port="0" to-layer="3071" to-port="0" />
+		<edge from-layer="3071" from-port="1" to-layer="3072" to-port="1" />
+		<edge from-layer="3072" from-port="2" to-layer="3073" to-port="1" />
+		<edge from-layer="3073" from-port="2" to-layer="3088" to-port="0" />
+		<edge from-layer="3073" from-port="2" to-layer="3098" to-port="0" />
+		<edge from-layer="3073" from-port="2" to-layer="3077" to-port="0" />
+		<edge from-layer="3074" from-port="0" to-layer="3088" to-port="1" />
+		<edge from-layer="3075" from-port="0" to-layer="3086" to-port="0" />
+		<edge from-layer="3076" from-port="0" to-layer="3086" to-port="1" />
+		<edge from-layer="3077" from-port="1" to-layer="3080" to-port="0" />
+		<edge from-layer="3078" from-port="0" to-layer="3080" to-port="1" />
+		<edge from-layer="3079" from-port="0" to-layer="3080" to-port="2" />
+		<edge from-layer="3080" from-port="3" to-layer="3082" to-port="0" />
+		<edge from-layer="3081" from-port="0" to-layer="3082" to-port="1" />
+		<edge from-layer="3082" from-port="2" to-layer="3084" to-port="0" />
+		<edge from-layer="3083" from-port="0" to-layer="3084" to-port="1" />
+		<edge from-layer="3084" from-port="2" to-layer="3092" to-port="2" />
+		<edge from-layer="3084" from-port="2" to-layer="3086" to-port="2" />
+		<edge from-layer="3084" from-port="2" to-layer="3095" to-port="0" />
+		<edge from-layer="3085" from-port="0" to-layer="3086" to-port="3" />
+		<edge from-layer="3086" from-port="4" to-layer="3088" to-port="2" />
+		<edge from-layer="3087" from-port="0" to-layer="3088" to-port="3" />
+		<edge from-layer="3088" from-port="4" to-layer="3100" to-port="0" />
+		<edge from-layer="3089" from-port="0" to-layer="3092" to-port="0" />
+		<edge from-layer="3090" from-port="0" to-layer="3096" to-port="1" />
+		<edge from-layer="3090" from-port="0" to-layer="3092" to-port="1" />
+		<edge from-layer="3091" from-port="0" to-layer="3092" to-port="3" />
+		<edge from-layer="3091" from-port="0" to-layer="3096" to-port="3" />
+		<edge from-layer="3092" from-port="4" to-layer="3098" to-port="1" />
+		<edge from-layer="3093" from-port="0" to-layer="3096" to-port="0" />
+		<edge from-layer="3094" from-port="0" to-layer="3095" to-port="1" />
+		<edge from-layer="3095" from-port="2" to-layer="3096" to-port="2" />
+		<edge from-layer="3096" from-port="4" to-layer="3098" to-port="2" />
+		<edge from-layer="3097" from-port="0" to-layer="3098" to-port="3" />
+		<edge from-layer="3098" from-port="4" to-layer="3099" to-port="0" />
+		<edge from-layer="3099" from-port="1" to-layer="3100" to-port="1" />
+		<edge from-layer="3100" from-port="2" to-layer="3103" to-port="0" />
+		<edge from-layer="3101" from-port="0" to-layer="3102" to-port="0" />
+		<edge from-layer="3102" from-port="1" to-layer="3103" to-port="1" />
+		<edge from-layer="3103" from-port="2" to-layer="3104" to-port="1" />
+		<edge from-layer="3104" from-port="2" to-layer="3105" to-port="0" />
+		<edge from-layer="3105" from-port="2" to-layer="3107" to-port="0" />
+		<edge from-layer="3106" from-port="0" to-layer="3107" to-port="1" />
+		<edge from-layer="3107" from-port="2" to-layer="3109" to-port="0" />
+		<edge from-layer="3108" from-port="0" to-layer="3109" to-port="1" />
+		<edge from-layer="3109" from-port="2" to-layer="3112" to-port="0" />
+		<edge from-layer="3110" from-port="0" to-layer="3111" to-port="0" />
+		<edge from-layer="3111" from-port="1" to-layer="3112" to-port="1" />
+		<edge from-layer="3112" from-port="2" to-layer="3115" to-port="0" />
+		<edge from-layer="3113" from-port="0" to-layer="3114" to-port="0" />
+		<edge from-layer="3114" from-port="1" to-layer="3115" to-port="1" />
+		<edge from-layer="3115" from-port="2" to-layer="3116" to-port="0" />
+		<edge from-layer="3116" from-port="2" to-layer="3117" to-port="0" />
+		<edge from-layer="3117" from-port="2" to-layer="3125" to-port="0" />
+		<edge from-layer="3117" from-port="2" to-layer="3128" to-port="0" />
+		<edge from-layer="3117" from-port="2" to-layer="3120" to-port="0" />
+		<edge from-layer="3118" from-port="0" to-layer="3119" to-port="0" />
+		<edge from-layer="3119" from-port="1" to-layer="3120" to-port="1" />
+		<edge from-layer="3120" from-port="2" to-layer="3123" to-port="0" />
+		<edge from-layer="3121" from-port="0" to-layer="3122" to-port="0" />
+		<edge from-layer="3122" from-port="1" to-layer="3123" to-port="1" />
+		<edge from-layer="3123" from-port="2" to-layer="3173" to-port="0" />
+		<edge from-layer="3124" from-port="0" to-layer="3125" to-port="1" />
+		<edge from-layer="3125" from-port="2" to-layer="3127" to-port="0" />
+		<edge from-layer="3126" from-port="0" to-layer="3127" to-port="1" />
+		<edge from-layer="3127" from-port="2" to-layer="3129" to-port="0" />
+		<edge from-layer="3128" from-port="1" to-layer="3129" to-port="1" />
+		<edge from-layer="3129" from-port="2" to-layer="3132" to-port="0" />
+		<edge from-layer="3130" from-port="0" to-layer="3131" to-port="0" />
+		<edge from-layer="3131" from-port="1" to-layer="3132" to-port="1" />
+		<edge from-layer="3132" from-port="2" to-layer="3135" to-port="0" />
+		<edge from-layer="3133" from-port="0" to-layer="3134" to-port="0" />
+		<edge from-layer="3134" from-port="1" to-layer="3135" to-port="1" />
+		<edge from-layer="3135" from-port="2" to-layer="3136" to-port="0" />
+		<edge from-layer="3136" from-port="1" to-layer="3139" to-port="0" />
+		<edge from-layer="3137" from-port="0" to-layer="3138" to-port="0" />
+		<edge from-layer="3138" from-port="1" to-layer="3139" to-port="1" />
+		<edge from-layer="3139" from-port="2" to-layer="3142" to-port="0" />
+		<edge from-layer="3140" from-port="0" to-layer="3141" to-port="0" />
+		<edge from-layer="3141" from-port="1" to-layer="3142" to-port="1" />
+		<edge from-layer="3142" from-port="2" to-layer="3153" to-port="0" />
+		<edge from-layer="3143" from-port="0" to-layer="3144" to-port="0" />
+		<edge from-layer="3144" from-port="1" to-layer="3145" to-port="1" />
+		<edge from-layer="3145" from-port="2" to-layer="3148" to-port="0" />
+		<edge from-layer="3146" from-port="0" to-layer="3147" to-port="0" />
+		<edge from-layer="3147" from-port="1" to-layer="3148" to-port="1" />
+		<edge from-layer="3148" from-port="2" to-layer="3150" to-port="0" />
+		<edge from-layer="3149" from-port="0" to-layer="3150" to-port="1" />
+		<edge from-layer="3150" from-port="2" to-layer="3152" to-port="0" />
+		<edge from-layer="3151" from-port="0" to-layer="3152" to-port="1" />
+		<edge from-layer="3152" from-port="2" to-layer="3153" to-port="1" />
+		<edge from-layer="3153" from-port="2" to-layer="3155" to-port="0" />
+		<edge from-layer="3153" from-port="2" to-layer="3158" to-port="0" />
+		<edge from-layer="3154" from-port="0" to-layer="3155" to-port="1" />
+		<edge from-layer="3155" from-port="2" to-layer="3157" to-port="0" />
+		<edge from-layer="3156" from-port="0" to-layer="3157" to-port="1" />
+		<edge from-layer="3157" from-port="2" to-layer="3159" to-port="0" />
+		<edge from-layer="3158" from-port="1" to-layer="3159" to-port="1" />
+		<edge from-layer="3159" from-port="2" to-layer="3162" to-port="0" />
+		<edge from-layer="3160" from-port="0" to-layer="3161" to-port="0" />
+		<edge from-layer="3161" from-port="1" to-layer="3162" to-port="1" />
+		<edge from-layer="3162" from-port="2" to-layer="3165" to-port="0" />
+		<edge from-layer="3163" from-port="0" to-layer="3164" to-port="0" />
+		<edge from-layer="3164" from-port="1" to-layer="3165" to-port="1" />
+		<edge from-layer="3165" from-port="2" to-layer="3166" to-port="0" />
+		<edge from-layer="3166" from-port="1" to-layer="3169" to-port="0" />
+		<edge from-layer="3167" from-port="0" to-layer="3168" to-port="0" />
+		<edge from-layer="3168" from-port="1" to-layer="3169" to-port="1" />
+		<edge from-layer="3169" from-port="2" to-layer="3172" to-port="0" />
+		<edge from-layer="3170" from-port="0" to-layer="3171" to-port="0" />
+		<edge from-layer="3171" from-port="1" to-layer="3172" to-port="1" />
+		<edge from-layer="3172" from-port="2" to-layer="3173" to-port="1" />
+		<edge from-layer="3173" from-port="2" to-layer="3354" to-port="1" />
+		<edge from-layer="3173" from-port="2" to-layer="3175" to-port="0" />
+		<edge from-layer="3173" from-port="2" to-layer="3178" to-port="0" />
+		<edge from-layer="3174" from-port="0" to-layer="3175" to-port="1" />
+		<edge from-layer="3175" from-port="2" to-layer="3177" to-port="0" />
+		<edge from-layer="3176" from-port="0" to-layer="3177" to-port="1" />
+		<edge from-layer="3177" from-port="2" to-layer="3179" to-port="0" />
+		<edge from-layer="3178" from-port="1" to-layer="3179" to-port="1" />
+		<edge from-layer="3179" from-port="2" to-layer="3182" to-port="0" />
+		<edge from-layer="3180" from-port="0" to-layer="3181" to-port="0" />
+		<edge from-layer="3181" from-port="1" to-layer="3182" to-port="1" />
+		<edge from-layer="3182" from-port="2" to-layer="3185" to-port="0" />
+		<edge from-layer="3183" from-port="0" to-layer="3184" to-port="0" />
+		<edge from-layer="3184" from-port="1" to-layer="3185" to-port="1" />
+		<edge from-layer="3185" from-port="2" to-layer="3188" to-port="0" />
+		<edge from-layer="3186" from-port="0" to-layer="3187" to-port="0" />
+		<edge from-layer="3187" from-port="1" to-layer="3188" to-port="1" />
+		<edge from-layer="3188" from-port="2" to-layer="3191" to-port="0" />
+		<edge from-layer="3189" from-port="0" to-layer="3190" to-port="0" />
+		<edge from-layer="3190" from-port="1" to-layer="3191" to-port="1" />
+		<edge from-layer="3191" from-port="2" to-layer="3193" to-port="0" />
+		<edge from-layer="3192" from-port="0" to-layer="3193" to-port="1" />
+		<edge from-layer="3193" from-port="2" to-layer="3195" to-port="0" />
+		<edge from-layer="3194" from-port="0" to-layer="3195" to-port="1" />
+		<edge from-layer="3195" from-port="2" to-layer="3247" to-port="1" />
+		<edge from-layer="3195" from-port="2" to-layer="3197" to-port="0" />
+		<edge from-layer="3196" from-port="0" to-layer="3197" to-port="1" />
+		<edge from-layer="3197" from-port="2" to-layer="3200" to-port="0" />
+		<edge from-layer="3198" from-port="0" to-layer="3199" to-port="0" />
+		<edge from-layer="3199" from-port="1" to-layer="3200" to-port="1" />
+		<edge from-layer="3200" from-port="2" to-layer="3203" to-port="0" />
+		<edge from-layer="3201" from-port="0" to-layer="3202" to-port="0" />
+		<edge from-layer="3202" from-port="1" to-layer="3203" to-port="1" />
+		<edge from-layer="3203" from-port="2" to-layer="3206" to-port="0" />
+		<edge from-layer="3203" from-port="2" to-layer="3215" to-port="0" />
+		<edge from-layer="3203" from-port="2" to-layer="3229" to-port="0" />
+		<edge from-layer="3204" from-port="0" to-layer="3205" to-port="0" />
+		<edge from-layer="3205" from-port="1" to-layer="3206" to-port="1" />
+		<edge from-layer="3206" from-port="2" to-layer="3208" to-port="0" />
+		<edge from-layer="3207" from-port="0" to-layer="3208" to-port="1" />
+		<edge from-layer="3208" from-port="2" to-layer="3210" to-port="0" />
+		<edge from-layer="3209" from-port="0" to-layer="3210" to-port="1" />
+		<edge from-layer="3210" from-port="2" to-layer="3212" to-port="0" />
+		<edge from-layer="3211" from-port="0" to-layer="3212" to-port="1" />
+		<edge from-layer="3212" from-port="2" to-layer="3225" to-port="0" />
+		<edge from-layer="3213" from-port="0" to-layer="3214" to-port="0" />
+		<edge from-layer="3214" from-port="1" to-layer="3215" to-port="1" />
+		<edge from-layer="3215" from-port="2" to-layer="3217" to-port="0" />
+		<edge from-layer="3216" from-port="0" to-layer="3217" to-port="1" />
+		<edge from-layer="3217" from-port="2" to-layer="3219" to-port="0" />
+		<edge from-layer="3218" from-port="0" to-layer="3219" to-port="1" />
+		<edge from-layer="3219" from-port="2" to-layer="3221" to-port="0" />
+		<edge from-layer="3220" from-port="0" to-layer="3221" to-port="1" />
+		<edge from-layer="3221" from-port="2" to-layer="3224" to-port="0" />
+		<edge from-layer="3222" from-port="0" to-layer="3223" to-port="0" />
+		<edge from-layer="3223" from-port="1" to-layer="3224" to-port="1" />
+		<edge from-layer="3224" from-port="2" to-layer="3225" to-port="1" />
+		<edge from-layer="3225" from-port="2" to-layer="3226" to-port="0" />
+		<edge from-layer="3226" from-port="1" to-layer="3236" to-port="0" />
+		<edge from-layer="3227" from-port="0" to-layer="3228" to-port="0" />
+		<edge from-layer="3228" from-port="1" to-layer="3229" to-port="1" />
+		<edge from-layer="3229" from-port="2" to-layer="3231" to-port="0" />
+		<edge from-layer="3230" from-port="0" to-layer="3231" to-port="1" />
+		<edge from-layer="3231" from-port="2" to-layer="3233" to-port="0" />
+		<edge from-layer="3232" from-port="0" to-layer="3233" to-port="1" />
+		<edge from-layer="3233" from-port="2" to-layer="3235" to-port="0" />
+		<edge from-layer="3234" from-port="0" to-layer="3235" to-port="1" />
+		<edge from-layer="3235" from-port="2" to-layer="3236" to-port="1" />
+		<edge from-layer="3236" from-port="2" to-layer="3238" to-port="0" />
+		<edge from-layer="3237" from-port="0" to-layer="3238" to-port="1" />
+		<edge from-layer="3238" from-port="2" to-layer="3240" to-port="0" />
+		<edge from-layer="3239" from-port="0" to-layer="3240" to-port="1" />
+		<edge from-layer="3240" from-port="2" to-layer="3242" to-port="0" />
+		<edge from-layer="3241" from-port="0" to-layer="3242" to-port="1" />
+		<edge from-layer="3242" from-port="2" to-layer="3245" to-port="0" />
+		<edge from-layer="3243" from-port="0" to-layer="3244" to-port="0" />
+		<edge from-layer="3244" from-port="1" to-layer="3245" to-port="1" />
+		<edge from-layer="3245" from-port="2" to-layer="3246" to-port="1" />
+		<edge from-layer="3246" from-port="2" to-layer="3247" to-port="0" />
+		<edge from-layer="3247" from-port="2" to-layer="3249" to-port="0" />
+		<edge from-layer="3247" from-port="2" to-layer="3299" to-port="1" />
+		<edge from-layer="3248" from-port="0" to-layer="3249" to-port="1" />
+		<edge from-layer="3249" from-port="2" to-layer="3252" to-port="0" />
+		<edge from-layer="3250" from-port="0" to-layer="3251" to-port="0" />
+		<edge from-layer="3251" from-port="1" to-layer="3252" to-port="1" />
+		<edge from-layer="3252" from-port="2" to-layer="3255" to-port="0" />
+		<edge from-layer="3253" from-port="0" to-layer="3254" to-port="0" />
+		<edge from-layer="3254" from-port="1" to-layer="3255" to-port="1" />
+		<edge from-layer="3255" from-port="2" to-layer="3258" to-port="0" />
+		<edge from-layer="3256" from-port="0" to-layer="3257" to-port="0" />
+		<edge from-layer="3257" from-port="1" to-layer="3258" to-port="1" />
+		<edge from-layer="3258" from-port="2" to-layer="3260" to-port="0" />
+		<edge from-layer="3259" from-port="0" to-layer="3260" to-port="1" />
+		<edge from-layer="3260" from-port="2" to-layer="3262" to-port="0" />
+		<edge from-layer="3261" from-port="0" to-layer="3262" to-port="1" />
+		<edge from-layer="3262" from-port="2" to-layer="3264" to-port="0" />
+		<edge from-layer="3263" from-port="0" to-layer="3264" to-port="1" />
+		<edge from-layer="3264" from-port="2" to-layer="3277" to-port="0" />
+		<edge from-layer="3265" from-port="0" to-layer="3266" to-port="0" />
+		<edge from-layer="3266" from-port="1" to-layer="3267" to-port="1" />
+		<edge from-layer="3267" from-port="2" to-layer="3269" to-port="0" />
+		<edge from-layer="3268" from-port="0" to-layer="3269" to-port="1" />
+		<edge from-layer="3269" from-port="2" to-layer="3271" to-port="0" />
+		<edge from-layer="3270" from-port="0" to-layer="3271" to-port="1" />
+		<edge from-layer="3271" from-port="2" to-layer="3273" to-port="0" />
+		<edge from-layer="3272" from-port="0" to-layer="3273" to-port="1" />
+		<edge from-layer="3273" from-port="2" to-layer="3276" to-port="0" />
+		<edge from-layer="3274" from-port="0" to-layer="3275" to-port="0" />
+		<edge from-layer="3275" from-port="1" to-layer="3276" to-port="1" />
+		<edge from-layer="3276" from-port="2" to-layer="3277" to-port="1" />
+		<edge from-layer="3277" from-port="2" to-layer="3278" to-port="0" />
+		<edge from-layer="3278" from-port="1" to-layer="3288" to-port="0" />
+		<edge from-layer="3279" from-port="0" to-layer="3280" to-port="0" />
+		<edge from-layer="3280" from-port="1" to-layer="3281" to-port="1" />
+		<edge from-layer="3281" from-port="2" to-layer="3283" to-port="0" />
+		<edge from-layer="3282" from-port="0" to-layer="3283" to-port="1" />
+		<edge from-layer="3283" from-port="2" to-layer="3285" to-port="0" />
+		<edge from-layer="3284" from-port="0" to-layer="3285" to-port="1" />
+		<edge from-layer="3285" from-port="2" to-layer="3287" to-port="0" />
+		<edge from-layer="3286" from-port="0" to-layer="3287" to-port="1" />
+		<edge from-layer="3287" from-port="2" to-layer="3288" to-port="1" />
+		<edge from-layer="3288" from-port="2" to-layer="3290" to-port="0" />
+		<edge from-layer="3289" from-port="0" to-layer="3290" to-port="1" />
+		<edge from-layer="3290" from-port="2" to-layer="3292" to-port="0" />
+		<edge from-layer="3291" from-port="0" to-layer="3292" to-port="1" />
+		<edge from-layer="3292" from-port="2" to-layer="3294" to-port="0" />
+		<edge from-layer="3293" from-port="0" to-layer="3294" to-port="1" />
+		<edge from-layer="3294" from-port="2" to-layer="3297" to-port="0" />
+		<edge from-layer="3295" from-port="0" to-layer="3296" to-port="0" />
+		<edge from-layer="3296" from-port="1" to-layer="3297" to-port="1" />
+		<edge from-layer="3297" from-port="2" to-layer="3298" to-port="1" />
+		<edge from-layer="3298" from-port="2" to-layer="3299" to-port="0" />
+		<edge from-layer="3299" from-port="2" to-layer="3343" to-port="1" />
+		<edge from-layer="3299" from-port="2" to-layer="3301" to-port="0" />
+		<edge from-layer="3300" from-port="0" to-layer="3301" to-port="1" />
+		<edge from-layer="3301" from-port="2" to-layer="3304" to-port="0" />
+		<edge from-layer="3302" from-port="0" to-layer="3303" to-port="0" />
+		<edge from-layer="3303" from-port="1" to-layer="3304" to-port="1" />
+		<edge from-layer="3304" from-port="2" to-layer="3307" to-port="0" />
+		<edge from-layer="3305" from-port="0" to-layer="3306" to-port="0" />
+		<edge from-layer="3306" from-port="1" to-layer="3307" to-port="1" />
+		<edge from-layer="3307" from-port="2" to-layer="3310" to-port="0" />
+		<edge from-layer="3308" from-port="0" to-layer="3309" to-port="0" />
+		<edge from-layer="3309" from-port="1" to-layer="3310" to-port="1" />
+		<edge from-layer="3310" from-port="2" to-layer="3311" to-port="1" />
+		<edge from-layer="3311" from-port="2" to-layer="3336" to-port="0" />
+		<edge from-layer="3311" from-port="2" to-layer="3315" to-port="0" />
+		<edge from-layer="3311" from-port="2" to-layer="3326" to-port="0" />
+		<edge from-layer="3312" from-port="0" to-layer="3326" to-port="1" />
+		<edge from-layer="3313" from-port="0" to-layer="3324" to-port="0" />
+		<edge from-layer="3314" from-port="0" to-layer="3324" to-port="1" />
+		<edge from-layer="3315" from-port="1" to-layer="3318" to-port="0" />
+		<edge from-layer="3316" from-port="0" to-layer="3318" to-port="1" />
+		<edge from-layer="3317" from-port="0" to-layer="3318" to-port="2" />
+		<edge from-layer="3318" from-port="3" to-layer="3320" to-port="0" />
+		<edge from-layer="3319" from-port="0" to-layer="3320" to-port="1" />
+		<edge from-layer="3320" from-port="2" to-layer="3322" to-port="0" />
+		<edge from-layer="3321" from-port="0" to-layer="3322" to-port="1" />
+		<edge from-layer="3322" from-port="2" to-layer="3333" to-port="0" />
+		<edge from-layer="3322" from-port="2" to-layer="3324" to-port="2" />
+		<edge from-layer="3322" from-port="2" to-layer="3330" to-port="2" />
+		<edge from-layer="3323" from-port="0" to-layer="3324" to-port="3" />
+		<edge from-layer="3324" from-port="4" to-layer="3326" to-port="2" />
+		<edge from-layer="3325" from-port="0" to-layer="3326" to-port="3" />
+		<edge from-layer="3326" from-port="4" to-layer="3338" to-port="0" />
+		<edge from-layer="3327" from-port="0" to-layer="3330" to-port="0" />
+		<edge from-layer="3328" from-port="0" to-layer="3334" to-port="1" />
+		<edge from-layer="3328" from-port="0" to-layer="3330" to-port="1" />
+		<edge from-layer="3329" from-port="0" to-layer="3330" to-port="3" />
+		<edge from-layer="3329" from-port="0" to-layer="3334" to-port="3" />
+		<edge from-layer="3330" from-port="4" to-layer="3336" to-port="1" />
+		<edge from-layer="3331" from-port="0" to-layer="3334" to-port="0" />
+		<edge from-layer="3332" from-port="0" to-layer="3333" to-port="1" />
+		<edge from-layer="3333" from-port="2" to-layer="3334" to-port="2" />
+		<edge from-layer="3334" from-port="4" to-layer="3336" to-port="2" />
+		<edge from-layer="3335" from-port="0" to-layer="3336" to-port="3" />
+		<edge from-layer="3336" from-port="4" to-layer="3337" to-port="0" />
+		<edge from-layer="3337" from-port="1" to-layer="3338" to-port="1" />
+		<edge from-layer="3338" from-port="2" to-layer="3341" to-port="0" />
+		<edge from-layer="3339" from-port="0" to-layer="3340" to-port="0" />
+		<edge from-layer="3340" from-port="1" to-layer="3341" to-port="1" />
+		<edge from-layer="3341" from-port="2" to-layer="3342" to-port="1" />
+		<edge from-layer="3342" from-port="2" to-layer="3343" to-port="0" />
+		<edge from-layer="3343" from-port="2" to-layer="3345" to-port="0" />
+		<edge from-layer="3344" from-port="0" to-layer="3345" to-port="1" />
+		<edge from-layer="3345" from-port="2" to-layer="3347" to-port="0" />
+		<edge from-layer="3346" from-port="0" to-layer="3347" to-port="1" />
+		<edge from-layer="3347" from-port="2" to-layer="3350" to-port="0" />
+		<edge from-layer="3348" from-port="0" to-layer="3349" to-port="0" />
+		<edge from-layer="3349" from-port="1" to-layer="3350" to-port="1" />
+		<edge from-layer="3350" from-port="2" to-layer="3353" to-port="0" />
+		<edge from-layer="3351" from-port="0" to-layer="3352" to-port="0" />
+		<edge from-layer="3352" from-port="1" to-layer="3353" to-port="1" />
+		<edge from-layer="3353" from-port="2" to-layer="3354" to-port="0" />
+		<edge from-layer="3354" from-port="2" to-layer="3355" to-port="0" />
+		<edge from-layer="3355" from-port="2" to-layer="3358" to-port="0" />
+		<edge from-layer="3355" from-port="2" to-layer="3363" to-port="0" />
+		<edge from-layer="3355" from-port="2" to-layer="3366" to-port="0" />
+		<edge from-layer="3356" from-port="0" to-layer="3357" to-port="0" />
+		<edge from-layer="3357" from-port="1" to-layer="3358" to-port="1" />
+		<edge from-layer="3358" from-port="2" to-layer="3361" to-port="0" />
+		<edge from-layer="3359" from-port="0" to-layer="3360" to-port="0" />
+		<edge from-layer="3360" from-port="1" to-layer="3361" to-port="1" />
+		<edge from-layer="3361" from-port="2" to-layer="3411" to-port="0" />
+		<edge from-layer="3362" from-port="0" to-layer="3363" to-port="1" />
+		<edge from-layer="3363" from-port="2" to-layer="3365" to-port="0" />
+		<edge from-layer="3364" from-port="0" to-layer="3365" to-port="1" />
+		<edge from-layer="3365" from-port="2" to-layer="3367" to-port="0" />
+		<edge from-layer="3366" from-port="1" to-layer="3367" to-port="1" />
+		<edge from-layer="3367" from-port="2" to-layer="3370" to-port="0" />
+		<edge from-layer="3368" from-port="0" to-layer="3369" to-port="0" />
+		<edge from-layer="3369" from-port="1" to-layer="3370" to-port="1" />
+		<edge from-layer="3370" from-port="2" to-layer="3373" to-port="0" />
+		<edge from-layer="3371" from-port="0" to-layer="3372" to-port="0" />
+		<edge from-layer="3372" from-port="1" to-layer="3373" to-port="1" />
+		<edge from-layer="3373" from-port="2" to-layer="3374" to-port="0" />
+		<edge from-layer="3374" from-port="1" to-layer="3377" to-port="0" />
+		<edge from-layer="3375" from-port="0" to-layer="3376" to-port="0" />
+		<edge from-layer="3376" from-port="1" to-layer="3377" to-port="1" />
+		<edge from-layer="3377" from-port="2" to-layer="3380" to-port="0" />
+		<edge from-layer="3378" from-port="0" to-layer="3379" to-port="0" />
+		<edge from-layer="3379" from-port="1" to-layer="3380" to-port="1" />
+		<edge from-layer="3380" from-port="2" to-layer="3391" to-port="0" />
+		<edge from-layer="3381" from-port="0" to-layer="3382" to-port="0" />
+		<edge from-layer="3382" from-port="1" to-layer="3383" to-port="1" />
+		<edge from-layer="3383" from-port="2" to-layer="3386" to-port="0" />
+		<edge from-layer="3384" from-port="0" to-layer="3385" to-port="0" />
+		<edge from-layer="3385" from-port="1" to-layer="3386" to-port="1" />
+		<edge from-layer="3386" from-port="2" to-layer="3388" to-port="0" />
+		<edge from-layer="3387" from-port="0" to-layer="3388" to-port="1" />
+		<edge from-layer="3388" from-port="2" to-layer="3390" to-port="0" />
+		<edge from-layer="3389" from-port="0" to-layer="3390" to-port="1" />
+		<edge from-layer="3390" from-port="2" to-layer="3391" to-port="1" />
+		<edge from-layer="3391" from-port="2" to-layer="3393" to-port="0" />
+		<edge from-layer="3391" from-port="2" to-layer="3396" to-port="0" />
+		<edge from-layer="3392" from-port="0" to-layer="3393" to-port="1" />
+		<edge from-layer="3393" from-port="2" to-layer="3395" to-port="0" />
+		<edge from-layer="3394" from-port="0" to-layer="3395" to-port="1" />
+		<edge from-layer="3395" from-port="2" to-layer="3397" to-port="0" />
+		<edge from-layer="3396" from-port="1" to-layer="3397" to-port="1" />
+		<edge from-layer="3397" from-port="2" to-layer="3400" to-port="0" />
+		<edge from-layer="3398" from-port="0" to-layer="3399" to-port="0" />
+		<edge from-layer="3399" from-port="1" to-layer="3400" to-port="1" />
+		<edge from-layer="3400" from-port="2" to-layer="3403" to-port="0" />
+		<edge from-layer="3401" from-port="0" to-layer="3402" to-port="0" />
+		<edge from-layer="3402" from-port="1" to-layer="3403" to-port="1" />
+		<edge from-layer="3403" from-port="2" to-layer="3404" to-port="0" />
+		<edge from-layer="3404" from-port="1" to-layer="3407" to-port="0" />
+		<edge from-layer="3405" from-port="0" to-layer="3406" to-port="0" />
+		<edge from-layer="3406" from-port="1" to-layer="3407" to-port="1" />
+		<edge from-layer="3407" from-port="2" to-layer="3410" to-port="0" />
+		<edge from-layer="3408" from-port="0" to-layer="3409" to-port="0" />
+		<edge from-layer="3409" from-port="1" to-layer="3410" to-port="1" />
+		<edge from-layer="3410" from-port="2" to-layer="3411" to-port="1" />
+		<edge from-layer="3411" from-port="2" to-layer="3416" to-port="0" />
+		<edge from-layer="3411" from-port="2" to-layer="3592" to-port="1" />
+		<edge from-layer="3411" from-port="2" to-layer="3413" to-port="0" />
+		<edge from-layer="3412" from-port="0" to-layer="3413" to-port="1" />
+		<edge from-layer="3413" from-port="2" to-layer="3415" to-port="0" />
+		<edge from-layer="3414" from-port="0" to-layer="3415" to-port="1" />
+		<edge from-layer="3415" from-port="2" to-layer="3417" to-port="0" />
+		<edge from-layer="3416" from-port="1" to-layer="3417" to-port="1" />
+		<edge from-layer="3417" from-port="2" to-layer="3420" to-port="0" />
+		<edge from-layer="3418" from-port="0" to-layer="3419" to-port="0" />
+		<edge from-layer="3419" from-port="1" to-layer="3420" to-port="1" />
+		<edge from-layer="3420" from-port="2" to-layer="3423" to-port="0" />
+		<edge from-layer="3421" from-port="0" to-layer="3422" to-port="0" />
+		<edge from-layer="3422" from-port="1" to-layer="3423" to-port="1" />
+		<edge from-layer="3423" from-port="2" to-layer="3426" to-port="0" />
+		<edge from-layer="3424" from-port="0" to-layer="3425" to-port="0" />
+		<edge from-layer="3425" from-port="1" to-layer="3426" to-port="1" />
+		<edge from-layer="3426" from-port="2" to-layer="3429" to-port="0" />
+		<edge from-layer="3427" from-port="0" to-layer="3428" to-port="0" />
+		<edge from-layer="3428" from-port="1" to-layer="3429" to-port="1" />
+		<edge from-layer="3429" from-port="2" to-layer="3431" to-port="0" />
+		<edge from-layer="3430" from-port="0" to-layer="3431" to-port="1" />
+		<edge from-layer="3431" from-port="2" to-layer="3433" to-port="0" />
+		<edge from-layer="3432" from-port="0" to-layer="3433" to-port="1" />
+		<edge from-layer="3433" from-port="2" to-layer="3485" to-port="1" />
+		<edge from-layer="3433" from-port="2" to-layer="3435" to-port="0" />
+		<edge from-layer="3434" from-port="0" to-layer="3435" to-port="1" />
+		<edge from-layer="3435" from-port="2" to-layer="3438" to-port="0" />
+		<edge from-layer="3436" from-port="0" to-layer="3437" to-port="0" />
+		<edge from-layer="3437" from-port="1" to-layer="3438" to-port="1" />
+		<edge from-layer="3438" from-port="2" to-layer="3441" to-port="0" />
+		<edge from-layer="3439" from-port="0" to-layer="3440" to-port="0" />
+		<edge from-layer="3440" from-port="1" to-layer="3441" to-port="1" />
+		<edge from-layer="3441" from-port="2" to-layer="3453" to-port="0" />
+		<edge from-layer="3441" from-port="2" to-layer="3467" to-port="0" />
+		<edge from-layer="3441" from-port="2" to-layer="3444" to-port="0" />
+		<edge from-layer="3442" from-port="0" to-layer="3443" to-port="0" />
+		<edge from-layer="3443" from-port="1" to-layer="3444" to-port="1" />
+		<edge from-layer="3444" from-port="2" to-layer="3446" to-port="0" />
+		<edge from-layer="3445" from-port="0" to-layer="3446" to-port="1" />
+		<edge from-layer="3446" from-port="2" to-layer="3448" to-port="0" />
+		<edge from-layer="3447" from-port="0" to-layer="3448" to-port="1" />
+		<edge from-layer="3448" from-port="2" to-layer="3450" to-port="0" />
+		<edge from-layer="3449" from-port="0" to-layer="3450" to-port="1" />
+		<edge from-layer="3450" from-port="2" to-layer="3463" to-port="0" />
+		<edge from-layer="3451" from-port="0" to-layer="3452" to-port="0" />
+		<edge from-layer="3452" from-port="1" to-layer="3453" to-port="1" />
+		<edge from-layer="3453" from-port="2" to-layer="3455" to-port="0" />
+		<edge from-layer="3454" from-port="0" to-layer="3455" to-port="1" />
+		<edge from-layer="3455" from-port="2" to-layer="3457" to-port="0" />
+		<edge from-layer="3456" from-port="0" to-layer="3457" to-port="1" />
+		<edge from-layer="3457" from-port="2" to-layer="3459" to-port="0" />
+		<edge from-layer="3458" from-port="0" to-layer="3459" to-port="1" />
+		<edge from-layer="3459" from-port="2" to-layer="3462" to-port="0" />
+		<edge from-layer="3460" from-port="0" to-layer="3461" to-port="0" />
+		<edge from-layer="3461" from-port="1" to-layer="3462" to-port="1" />
+		<edge from-layer="3462" from-port="2" to-layer="3463" to-port="1" />
+		<edge from-layer="3463" from-port="2" to-layer="3464" to-port="0" />
+		<edge from-layer="3464" from-port="1" to-layer="3474" to-port="0" />
+		<edge from-layer="3465" from-port="0" to-layer="3466" to-port="0" />
+		<edge from-layer="3466" from-port="1" to-layer="3467" to-port="1" />
+		<edge from-layer="3467" from-port="2" to-layer="3469" to-port="0" />
+		<edge from-layer="3468" from-port="0" to-layer="3469" to-port="1" />
+		<edge from-layer="3469" from-port="2" to-layer="3471" to-port="0" />
+		<edge from-layer="3470" from-port="0" to-layer="3471" to-port="1" />
+		<edge from-layer="3471" from-port="2" to-layer="3473" to-port="0" />
+		<edge from-layer="3472" from-port="0" to-layer="3473" to-port="1" />
+		<edge from-layer="3473" from-port="2" to-layer="3474" to-port="1" />
+		<edge from-layer="3474" from-port="2" to-layer="3476" to-port="0" />
+		<edge from-layer="3475" from-port="0" to-layer="3476" to-port="1" />
+		<edge from-layer="3476" from-port="2" to-layer="3478" to-port="0" />
+		<edge from-layer="3477" from-port="0" to-layer="3478" to-port="1" />
+		<edge from-layer="3478" from-port="2" to-layer="3480" to-port="0" />
+		<edge from-layer="3479" from-port="0" to-layer="3480" to-port="1" />
+		<edge from-layer="3480" from-port="2" to-layer="3483" to-port="0" />
+		<edge from-layer="3481" from-port="0" to-layer="3482" to-port="0" />
+		<edge from-layer="3482" from-port="1" to-layer="3483" to-port="1" />
+		<edge from-layer="3483" from-port="2" to-layer="3484" to-port="1" />
+		<edge from-layer="3484" from-port="2" to-layer="3485" to-port="0" />
+		<edge from-layer="3485" from-port="2" to-layer="3487" to-port="0" />
+		<edge from-layer="3485" from-port="2" to-layer="3537" to-port="1" />
+		<edge from-layer="3486" from-port="0" to-layer="3487" to-port="1" />
+		<edge from-layer="3487" from-port="2" to-layer="3490" to-port="0" />
+		<edge from-layer="3488" from-port="0" to-layer="3489" to-port="0" />
+		<edge from-layer="3489" from-port="1" to-layer="3490" to-port="1" />
+		<edge from-layer="3490" from-port="2" to-layer="3493" to-port="0" />
+		<edge from-layer="3491" from-port="0" to-layer="3492" to-port="0" />
+		<edge from-layer="3492" from-port="1" to-layer="3493" to-port="1" />
+		<edge from-layer="3493" from-port="2" to-layer="3496" to-port="0" />
+		<edge from-layer="3494" from-port="0" to-layer="3495" to-port="0" />
+		<edge from-layer="3495" from-port="1" to-layer="3496" to-port="1" />
+		<edge from-layer="3496" from-port="2" to-layer="3498" to-port="0" />
+		<edge from-layer="3497" from-port="0" to-layer="3498" to-port="1" />
+		<edge from-layer="3498" from-port="2" to-layer="3500" to-port="0" />
+		<edge from-layer="3499" from-port="0" to-layer="3500" to-port="1" />
+		<edge from-layer="3500" from-port="2" to-layer="3502" to-port="0" />
+		<edge from-layer="3501" from-port="0" to-layer="3502" to-port="1" />
+		<edge from-layer="3502" from-port="2" to-layer="3515" to-port="0" />
+		<edge from-layer="3503" from-port="0" to-layer="3504" to-port="0" />
+		<edge from-layer="3504" from-port="1" to-layer="3505" to-port="1" />
+		<edge from-layer="3505" from-port="2" to-layer="3507" to-port="0" />
+		<edge from-layer="3506" from-port="0" to-layer="3507" to-port="1" />
+		<edge from-layer="3507" from-port="2" to-layer="3509" to-port="0" />
+		<edge from-layer="3508" from-port="0" to-layer="3509" to-port="1" />
+		<edge from-layer="3509" from-port="2" to-layer="3511" to-port="0" />
+		<edge from-layer="3510" from-port="0" to-layer="3511" to-port="1" />
+		<edge from-layer="3511" from-port="2" to-layer="3514" to-port="0" />
+		<edge from-layer="3512" from-port="0" to-layer="3513" to-port="0" />
+		<edge from-layer="3513" from-port="1" to-layer="3514" to-port="1" />
+		<edge from-layer="3514" from-port="2" to-layer="3515" to-port="1" />
+		<edge from-layer="3515" from-port="2" to-layer="3516" to-port="0" />
+		<edge from-layer="3516" from-port="1" to-layer="3526" to-port="0" />
+		<edge from-layer="3517" from-port="0" to-layer="3518" to-port="0" />
+		<edge from-layer="3518" from-port="1" to-layer="3519" to-port="1" />
+		<edge from-layer="3519" from-port="2" to-layer="3521" to-port="0" />
+		<edge from-layer="3520" from-port="0" to-layer="3521" to-port="1" />
+		<edge from-layer="3521" from-port="2" to-layer="3523" to-port="0" />
+		<edge from-layer="3522" from-port="0" to-layer="3523" to-port="1" />
+		<edge from-layer="3523" from-port="2" to-layer="3525" to-port="0" />
+		<edge from-layer="3524" from-port="0" to-layer="3525" to-port="1" />
+		<edge from-layer="3525" from-port="2" to-layer="3526" to-port="1" />
+		<edge from-layer="3526" from-port="2" to-layer="3528" to-port="0" />
+		<edge from-layer="3527" from-port="0" to-layer="3528" to-port="1" />
+		<edge from-layer="3528" from-port="2" to-layer="3530" to-port="0" />
+		<edge from-layer="3529" from-port="0" to-layer="3530" to-port="1" />
+		<edge from-layer="3530" from-port="2" to-layer="3532" to-port="0" />
+		<edge from-layer="3531" from-port="0" to-layer="3532" to-port="1" />
+		<edge from-layer="3532" from-port="2" to-layer="3535" to-port="0" />
+		<edge from-layer="3533" from-port="0" to-layer="3534" to-port="0" />
+		<edge from-layer="3534" from-port="1" to-layer="3535" to-port="1" />
+		<edge from-layer="3535" from-port="2" to-layer="3536" to-port="1" />
+		<edge from-layer="3536" from-port="2" to-layer="3537" to-port="0" />
+		<edge from-layer="3537" from-port="2" to-layer="3539" to-port="0" />
+		<edge from-layer="3537" from-port="2" to-layer="3581" to-port="1" />
+		<edge from-layer="3538" from-port="0" to-layer="3539" to-port="1" />
+		<edge from-layer="3539" from-port="2" to-layer="3542" to-port="0" />
+		<edge from-layer="3540" from-port="0" to-layer="3541" to-port="0" />
+		<edge from-layer="3541" from-port="1" to-layer="3542" to-port="1" />
+		<edge from-layer="3542" from-port="2" to-layer="3545" to-port="0" />
+		<edge from-layer="3543" from-port="0" to-layer="3544" to-port="0" />
+		<edge from-layer="3544" from-port="1" to-layer="3545" to-port="1" />
+		<edge from-layer="3545" from-port="2" to-layer="3548" to-port="0" />
+		<edge from-layer="3546" from-port="0" to-layer="3547" to-port="0" />
+		<edge from-layer="3547" from-port="1" to-layer="3548" to-port="1" />
+		<edge from-layer="3548" from-port="2" to-layer="3549" to-port="1" />
+		<edge from-layer="3549" from-port="2" to-layer="3564" to-port="0" />
+		<edge from-layer="3549" from-port="2" to-layer="3574" to-port="0" />
+		<edge from-layer="3549" from-port="2" to-layer="3553" to-port="0" />
+		<edge from-layer="3550" from-port="0" to-layer="3564" to-port="1" />
+		<edge from-layer="3551" from-port="0" to-layer="3562" to-port="0" />
+		<edge from-layer="3552" from-port="0" to-layer="3562" to-port="1" />
+		<edge from-layer="3553" from-port="1" to-layer="3556" to-port="0" />
+		<edge from-layer="3554" from-port="0" to-layer="3556" to-port="1" />
+		<edge from-layer="3555" from-port="0" to-layer="3556" to-port="2" />
+		<edge from-layer="3556" from-port="3" to-layer="3558" to-port="0" />
+		<edge from-layer="3557" from-port="0" to-layer="3558" to-port="1" />
+		<edge from-layer="3558" from-port="2" to-layer="3560" to-port="0" />
+		<edge from-layer="3559" from-port="0" to-layer="3560" to-port="1" />
+		<edge from-layer="3560" from-port="2" to-layer="3562" to-port="2" />
+		<edge from-layer="3560" from-port="2" to-layer="3571" to-port="0" />
+		<edge from-layer="3560" from-port="2" to-layer="3568" to-port="2" />
+		<edge from-layer="3561" from-port="0" to-layer="3562" to-port="3" />
+		<edge from-layer="3562" from-port="4" to-layer="3564" to-port="2" />
+		<edge from-layer="3563" from-port="0" to-layer="3564" to-port="3" />
+		<edge from-layer="3564" from-port="4" to-layer="3576" to-port="0" />
+		<edge from-layer="3565" from-port="0" to-layer="3568" to-port="0" />
+		<edge from-layer="3566" from-port="0" to-layer="3572" to-port="1" />
+		<edge from-layer="3566" from-port="0" to-layer="3568" to-port="1" />
+		<edge from-layer="3567" from-port="0" to-layer="3572" to-port="3" />
+		<edge from-layer="3567" from-port="0" to-layer="3568" to-port="3" />
+		<edge from-layer="3568" from-port="4" to-layer="3574" to-port="1" />
+		<edge from-layer="3569" from-port="0" to-layer="3572" to-port="0" />
+		<edge from-layer="3570" from-port="0" to-layer="3571" to-port="1" />
+		<edge from-layer="3571" from-port="2" to-layer="3572" to-port="2" />
+		<edge from-layer="3572" from-port="4" to-layer="3574" to-port="2" />
+		<edge from-layer="3573" from-port="0" to-layer="3574" to-port="3" />
+		<edge from-layer="3574" from-port="4" to-layer="3575" to-port="0" />
+		<edge from-layer="3575" from-port="1" to-layer="3576" to-port="1" />
+		<edge from-layer="3576" from-port="2" to-layer="3579" to-port="0" />
+		<edge from-layer="3577" from-port="0" to-layer="3578" to-port="0" />
+		<edge from-layer="3578" from-port="1" to-layer="3579" to-port="1" />
+		<edge from-layer="3579" from-port="2" to-layer="3580" to-port="1" />
+		<edge from-layer="3580" from-port="2" to-layer="3581" to-port="0" />
+		<edge from-layer="3581" from-port="2" to-layer="3583" to-port="0" />
+		<edge from-layer="3582" from-port="0" to-layer="3583" to-port="1" />
+		<edge from-layer="3583" from-port="2" to-layer="3585" to-port="0" />
+		<edge from-layer="3584" from-port="0" to-layer="3585" to-port="1" />
+		<edge from-layer="3585" from-port="2" to-layer="3588" to-port="0" />
+		<edge from-layer="3586" from-port="0" to-layer="3587" to-port="0" />
+		<edge from-layer="3587" from-port="1" to-layer="3588" to-port="1" />
+		<edge from-layer="3588" from-port="2" to-layer="3591" to-port="0" />
+		<edge from-layer="3589" from-port="0" to-layer="3590" to-port="0" />
+		<edge from-layer="3590" from-port="1" to-layer="3591" to-port="1" />
+		<edge from-layer="3591" from-port="2" to-layer="3592" to-port="0" />
+		<edge from-layer="3592" from-port="2" to-layer="3598" to-port="0" />
+		<edge from-layer="3592" from-port="2" to-layer="3593" to-port="0" />
+		<edge from-layer="3593" from-port="1" to-layer="3594" to-port="0" />
+		<edge from-layer="3594" from-port="1" to-layer="3596" to-port="0" />
+		<edge from-layer="3595" from-port="0" to-layer="3596" to-port="1" />
+		<edge from-layer="3595" from-port="0" to-layer="3598" to-port="2" />
+		<edge from-layer="3596" from-port="2" to-layer="3597" to-port="0" />
+		<edge from-layer="3597" from-port="1" to-layer="3598" to-port="1" />
+		<edge from-layer="3598" from-port="3" to-layer="3601" to-port="0" />
+		<edge from-layer="3599" from-port="0" to-layer="3600" to-port="0" />
+		<edge from-layer="3600" from-port="1" to-layer="3601" to-port="1" />
+		<edge from-layer="3601" from-port="2" to-layer="3604" to-port="0" />
+		<edge from-layer="3602" from-port="0" to-layer="3603" to-port="0" />
+		<edge from-layer="3603" from-port="1" to-layer="3604" to-port="1" />
+		<edge from-layer="3604" from-port="2" to-layer="3605" to-port="0" />
+		<edge from-layer="3605" from-port="2" to-layer="3608" to-port="0" />
+		<edge from-layer="3605" from-port="2" to-layer="3613" to-port="0" />
+		<edge from-layer="3605" from-port="2" to-layer="3616" to-port="0" />
+		<edge from-layer="3606" from-port="0" to-layer="3607" to-port="0" />
+		<edge from-layer="3607" from-port="1" to-layer="3608" to-port="1" />
+		<edge from-layer="3608" from-port="2" to-layer="3611" to-port="0" />
+		<edge from-layer="3609" from-port="0" to-layer="3610" to-port="0" />
+		<edge from-layer="3610" from-port="1" to-layer="3611" to-port="1" />
+		<edge from-layer="3611" from-port="2" to-layer="3661" to-port="0" />
+		<edge from-layer="3612" from-port="0" to-layer="3613" to-port="1" />
+		<edge from-layer="3613" from-port="2" to-layer="3615" to-port="0" />
+		<edge from-layer="3614" from-port="0" to-layer="3615" to-port="1" />
+		<edge from-layer="3615" from-port="2" to-layer="3617" to-port="0" />
+		<edge from-layer="3616" from-port="1" to-layer="3617" to-port="1" />
+		<edge from-layer="3617" from-port="2" to-layer="3620" to-port="0" />
+		<edge from-layer="3618" from-port="0" to-layer="3619" to-port="0" />
+		<edge from-layer="3619" from-port="1" to-layer="3620" to-port="1" />
+		<edge from-layer="3620" from-port="2" to-layer="3623" to-port="0" />
+		<edge from-layer="3621" from-port="0" to-layer="3622" to-port="0" />
+		<edge from-layer="3622" from-port="1" to-layer="3623" to-port="1" />
+		<edge from-layer="3623" from-port="2" to-layer="3624" to-port="0" />
+		<edge from-layer="3624" from-port="1" to-layer="3627" to-port="0" />
+		<edge from-layer="3625" from-port="0" to-layer="3626" to-port="0" />
+		<edge from-layer="3626" from-port="1" to-layer="3627" to-port="1" />
+		<edge from-layer="3627" from-port="2" to-layer="3630" to-port="0" />
+		<edge from-layer="3628" from-port="0" to-layer="3629" to-port="0" />
+		<edge from-layer="3629" from-port="1" to-layer="3630" to-port="1" />
+		<edge from-layer="3630" from-port="2" to-layer="3641" to-port="0" />
+		<edge from-layer="3631" from-port="0" to-layer="3632" to-port="0" />
+		<edge from-layer="3632" from-port="1" to-layer="3633" to-port="1" />
+		<edge from-layer="3633" from-port="2" to-layer="3636" to-port="0" />
+		<edge from-layer="3634" from-port="0" to-layer="3635" to-port="0" />
+		<edge from-layer="3635" from-port="1" to-layer="3636" to-port="1" />
+		<edge from-layer="3636" from-port="2" to-layer="3638" to-port="0" />
+		<edge from-layer="3637" from-port="0" to-layer="3638" to-port="1" />
+		<edge from-layer="3638" from-port="2" to-layer="3640" to-port="0" />
+		<edge from-layer="3639" from-port="0" to-layer="3640" to-port="1" />
+		<edge from-layer="3640" from-port="2" to-layer="3641" to-port="1" />
+		<edge from-layer="3641" from-port="2" to-layer="3643" to-port="0" />
+		<edge from-layer="3641" from-port="2" to-layer="3646" to-port="0" />
+		<edge from-layer="3642" from-port="0" to-layer="3643" to-port="1" />
+		<edge from-layer="3643" from-port="2" to-layer="3645" to-port="0" />
+		<edge from-layer="3644" from-port="0" to-layer="3645" to-port="1" />
+		<edge from-layer="3645" from-port="2" to-layer="3647" to-port="0" />
+		<edge from-layer="3646" from-port="1" to-layer="3647" to-port="1" />
+		<edge from-layer="3647" from-port="2" to-layer="3650" to-port="0" />
+		<edge from-layer="3648" from-port="0" to-layer="3649" to-port="0" />
+		<edge from-layer="3649" from-port="1" to-layer="3650" to-port="1" />
+		<edge from-layer="3650" from-port="2" to-layer="3653" to-port="0" />
+		<edge from-layer="3651" from-port="0" to-layer="3652" to-port="0" />
+		<edge from-layer="3652" from-port="1" to-layer="3653" to-port="1" />
+		<edge from-layer="3653" from-port="2" to-layer="3654" to-port="0" />
+		<edge from-layer="3654" from-port="1" to-layer="3657" to-port="0" />
+		<edge from-layer="3655" from-port="0" to-layer="3656" to-port="0" />
+		<edge from-layer="3656" from-port="1" to-layer="3657" to-port="1" />
+		<edge from-layer="3657" from-port="2" to-layer="3660" to-port="0" />
+		<edge from-layer="3658" from-port="0" to-layer="3659" to-port="0" />
+		<edge from-layer="3659" from-port="1" to-layer="3660" to-port="1" />
+		<edge from-layer="3660" from-port="2" to-layer="3661" to-port="1" />
+		<edge from-layer="3661" from-port="2" to-layer="3666" to-port="0" />
+		<edge from-layer="3661" from-port="2" to-layer="3663" to-port="0" />
+		<edge from-layer="3661" from-port="2" to-layer="3842" to-port="1" />
+		<edge from-layer="3662" from-port="0" to-layer="3663" to-port="1" />
+		<edge from-layer="3663" from-port="2" to-layer="3665" to-port="0" />
+		<edge from-layer="3664" from-port="0" to-layer="3665" to-port="1" />
+		<edge from-layer="3665" from-port="2" to-layer="3667" to-port="0" />
+		<edge from-layer="3666" from-port="1" to-layer="3667" to-port="1" />
+		<edge from-layer="3667" from-port="2" to-layer="3670" to-port="0" />
+		<edge from-layer="3668" from-port="0" to-layer="3669" to-port="0" />
+		<edge from-layer="3669" from-port="1" to-layer="3670" to-port="1" />
+		<edge from-layer="3670" from-port="2" to-layer="3673" to-port="0" />
+		<edge from-layer="3671" from-port="0" to-layer="3672" to-port="0" />
+		<edge from-layer="3672" from-port="1" to-layer="3673" to-port="1" />
+		<edge from-layer="3673" from-port="2" to-layer="3676" to-port="0" />
+		<edge from-layer="3674" from-port="0" to-layer="3675" to-port="0" />
+		<edge from-layer="3675" from-port="1" to-layer="3676" to-port="1" />
+		<edge from-layer="3676" from-port="2" to-layer="3679" to-port="0" />
+		<edge from-layer="3677" from-port="0" to-layer="3678" to-port="0" />
+		<edge from-layer="3678" from-port="1" to-layer="3679" to-port="1" />
+		<edge from-layer="3679" from-port="2" to-layer="3681" to-port="0" />
+		<edge from-layer="3680" from-port="0" to-layer="3681" to-port="1" />
+		<edge from-layer="3681" from-port="2" to-layer="3683" to-port="0" />
+		<edge from-layer="3682" from-port="0" to-layer="3683" to-port="1" />
+		<edge from-layer="3683" from-port="2" to-layer="3735" to-port="1" />
+		<edge from-layer="3683" from-port="2" to-layer="3685" to-port="0" />
+		<edge from-layer="3684" from-port="0" to-layer="3685" to-port="1" />
+		<edge from-layer="3685" from-port="2" to-layer="3688" to-port="0" />
+		<edge from-layer="3686" from-port="0" to-layer="3687" to-port="0" />
+		<edge from-layer="3687" from-port="1" to-layer="3688" to-port="1" />
+		<edge from-layer="3688" from-port="2" to-layer="3691" to-port="0" />
+		<edge from-layer="3689" from-port="0" to-layer="3690" to-port="0" />
+		<edge from-layer="3690" from-port="1" to-layer="3691" to-port="1" />
+		<edge from-layer="3691" from-port="2" to-layer="3717" to-port="0" />
+		<edge from-layer="3691" from-port="2" to-layer="3694" to-port="0" />
+		<edge from-layer="3691" from-port="2" to-layer="3703" to-port="0" />
+		<edge from-layer="3692" from-port="0" to-layer="3693" to-port="0" />
+		<edge from-layer="3693" from-port="1" to-layer="3694" to-port="1" />
+		<edge from-layer="3694" from-port="2" to-layer="3696" to-port="0" />
+		<edge from-layer="3695" from-port="0" to-layer="3696" to-port="1" />
+		<edge from-layer="3696" from-port="2" to-layer="3698" to-port="0" />
+		<edge from-layer="3697" from-port="0" to-layer="3698" to-port="1" />
+		<edge from-layer="3698" from-port="2" to-layer="3700" to-port="0" />
+		<edge from-layer="3699" from-port="0" to-layer="3700" to-port="1" />
+		<edge from-layer="3700" from-port="2" to-layer="3713" to-port="0" />
+		<edge from-layer="3701" from-port="0" to-layer="3702" to-port="0" />
+		<edge from-layer="3702" from-port="1" to-layer="3703" to-port="1" />
+		<edge from-layer="3703" from-port="2" to-layer="3705" to-port="0" />
+		<edge from-layer="3704" from-port="0" to-layer="3705" to-port="1" />
+		<edge from-layer="3705" from-port="2" to-layer="3707" to-port="0" />
+		<edge from-layer="3706" from-port="0" to-layer="3707" to-port="1" />
+		<edge from-layer="3707" from-port="2" to-layer="3709" to-port="0" />
+		<edge from-layer="3708" from-port="0" to-layer="3709" to-port="1" />
+		<edge from-layer="3709" from-port="2" to-layer="3712" to-port="0" />
+		<edge from-layer="3710" from-port="0" to-layer="3711" to-port="0" />
+		<edge from-layer="3711" from-port="1" to-layer="3712" to-port="1" />
+		<edge from-layer="3712" from-port="2" to-layer="3713" to-port="1" />
+		<edge from-layer="3713" from-port="2" to-layer="3714" to-port="0" />
+		<edge from-layer="3714" from-port="1" to-layer="3724" to-port="0" />
+		<edge from-layer="3715" from-port="0" to-layer="3716" to-port="0" />
+		<edge from-layer="3716" from-port="1" to-layer="3717" to-port="1" />
+		<edge from-layer="3717" from-port="2" to-layer="3719" to-port="0" />
+		<edge from-layer="3718" from-port="0" to-layer="3719" to-port="1" />
+		<edge from-layer="3719" from-port="2" to-layer="3721" to-port="0" />
+		<edge from-layer="3720" from-port="0" to-layer="3721" to-port="1" />
+		<edge from-layer="3721" from-port="2" to-layer="3723" to-port="0" />
+		<edge from-layer="3722" from-port="0" to-layer="3723" to-port="1" />
+		<edge from-layer="3723" from-port="2" to-layer="3724" to-port="1" />
+		<edge from-layer="3724" from-port="2" to-layer="3726" to-port="0" />
+		<edge from-layer="3725" from-port="0" to-layer="3726" to-port="1" />
+		<edge from-layer="3726" from-port="2" to-layer="3728" to-port="0" />
+		<edge from-layer="3727" from-port="0" to-layer="3728" to-port="1" />
+		<edge from-layer="3728" from-port="2" to-layer="3730" to-port="0" />
+		<edge from-layer="3729" from-port="0" to-layer="3730" to-port="1" />
+		<edge from-layer="3730" from-port="2" to-layer="3733" to-port="0" />
+		<edge from-layer="3731" from-port="0" to-layer="3732" to-port="0" />
+		<edge from-layer="3732" from-port="1" to-layer="3733" to-port="1" />
+		<edge from-layer="3733" from-port="2" to-layer="3734" to-port="1" />
+		<edge from-layer="3734" from-port="2" to-layer="3735" to-port="0" />
+		<edge from-layer="3735" from-port="2" to-layer="3737" to-port="0" />
+		<edge from-layer="3735" from-port="2" to-layer="3787" to-port="1" />
+		<edge from-layer="3736" from-port="0" to-layer="3737" to-port="1" />
+		<edge from-layer="3737" from-port="2" to-layer="3740" to-port="0" />
+		<edge from-layer="3738" from-port="0" to-layer="3739" to-port="0" />
+		<edge from-layer="3739" from-port="1" to-layer="3740" to-port="1" />
+		<edge from-layer="3740" from-port="2" to-layer="3743" to-port="0" />
+		<edge from-layer="3741" from-port="0" to-layer="3742" to-port="0" />
+		<edge from-layer="3742" from-port="1" to-layer="3743" to-port="1" />
+		<edge from-layer="3743" from-port="2" to-layer="3746" to-port="0" />
+		<edge from-layer="3744" from-port="0" to-layer="3745" to-port="0" />
+		<edge from-layer="3745" from-port="1" to-layer="3746" to-port="1" />
+		<edge from-layer="3746" from-port="2" to-layer="3748" to-port="0" />
+		<edge from-layer="3747" from-port="0" to-layer="3748" to-port="1" />
+		<edge from-layer="3748" from-port="2" to-layer="3750" to-port="0" />
+		<edge from-layer="3749" from-port="0" to-layer="3750" to-port="1" />
+		<edge from-layer="3750" from-port="2" to-layer="3752" to-port="0" />
+		<edge from-layer="3751" from-port="0" to-layer="3752" to-port="1" />
+		<edge from-layer="3752" from-port="2" to-layer="3765" to-port="0" />
+		<edge from-layer="3753" from-port="0" to-layer="3754" to-port="0" />
+		<edge from-layer="3754" from-port="1" to-layer="3755" to-port="1" />
+		<edge from-layer="3755" from-port="2" to-layer="3757" to-port="0" />
+		<edge from-layer="3756" from-port="0" to-layer="3757" to-port="1" />
+		<edge from-layer="3757" from-port="2" to-layer="3759" to-port="0" />
+		<edge from-layer="3758" from-port="0" to-layer="3759" to-port="1" />
+		<edge from-layer="3759" from-port="2" to-layer="3761" to-port="0" />
+		<edge from-layer="3760" from-port="0" to-layer="3761" to-port="1" />
+		<edge from-layer="3761" from-port="2" to-layer="3764" to-port="0" />
+		<edge from-layer="3762" from-port="0" to-layer="3763" to-port="0" />
+		<edge from-layer="3763" from-port="1" to-layer="3764" to-port="1" />
+		<edge from-layer="3764" from-port="2" to-layer="3765" to-port="1" />
+		<edge from-layer="3765" from-port="2" to-layer="3766" to-port="0" />
+		<edge from-layer="3766" from-port="1" to-layer="3776" to-port="0" />
+		<edge from-layer="3767" from-port="0" to-layer="3768" to-port="0" />
+		<edge from-layer="3768" from-port="1" to-layer="3769" to-port="1" />
+		<edge from-layer="3769" from-port="2" to-layer="3771" to-port="0" />
+		<edge from-layer="3770" from-port="0" to-layer="3771" to-port="1" />
+		<edge from-layer="3771" from-port="2" to-layer="3773" to-port="0" />
+		<edge from-layer="3772" from-port="0" to-layer="3773" to-port="1" />
+		<edge from-layer="3773" from-port="2" to-layer="3775" to-port="0" />
+		<edge from-layer="3774" from-port="0" to-layer="3775" to-port="1" />
+		<edge from-layer="3775" from-port="2" to-layer="3776" to-port="1" />
+		<edge from-layer="3776" from-port="2" to-layer="3778" to-port="0" />
+		<edge from-layer="3777" from-port="0" to-layer="3778" to-port="1" />
+		<edge from-layer="3778" from-port="2" to-layer="3780" to-port="0" />
+		<edge from-layer="3779" from-port="0" to-layer="3780" to-port="1" />
+		<edge from-layer="3780" from-port="2" to-layer="3782" to-port="0" />
+		<edge from-layer="3781" from-port="0" to-layer="3782" to-port="1" />
+		<edge from-layer="3782" from-port="2" to-layer="3785" to-port="0" />
+		<edge from-layer="3783" from-port="0" to-layer="3784" to-port="0" />
+		<edge from-layer="3784" from-port="1" to-layer="3785" to-port="1" />
+		<edge from-layer="3785" from-port="2" to-layer="3786" to-port="1" />
+		<edge from-layer="3786" from-port="2" to-layer="3787" to-port="0" />
+		<edge from-layer="3787" from-port="2" to-layer="3789" to-port="0" />
+		<edge from-layer="3787" from-port="2" to-layer="3831" to-port="1" />
+		<edge from-layer="3788" from-port="0" to-layer="3789" to-port="1" />
+		<edge from-layer="3789" from-port="2" to-layer="3792" to-port="0" />
+		<edge from-layer="3790" from-port="0" to-layer="3791" to-port="0" />
+		<edge from-layer="3791" from-port="1" to-layer="3792" to-port="1" />
+		<edge from-layer="3792" from-port="2" to-layer="3795" to-port="0" />
+		<edge from-layer="3793" from-port="0" to-layer="3794" to-port="0" />
+		<edge from-layer="3794" from-port="1" to-layer="3795" to-port="1" />
+		<edge from-layer="3795" from-port="2" to-layer="3798" to-port="0" />
+		<edge from-layer="3796" from-port="0" to-layer="3797" to-port="0" />
+		<edge from-layer="3797" from-port="1" to-layer="3798" to-port="1" />
+		<edge from-layer="3798" from-port="2" to-layer="3799" to-port="1" />
+		<edge from-layer="3799" from-port="2" to-layer="3803" to-port="0" />
+		<edge from-layer="3799" from-port="2" to-layer="3824" to-port="0" />
+		<edge from-layer="3799" from-port="2" to-layer="3814" to-port="0" />
+		<edge from-layer="3800" from-port="0" to-layer="3814" to-port="1" />
+		<edge from-layer="3801" from-port="0" to-layer="3812" to-port="0" />
+		<edge from-layer="3802" from-port="0" to-layer="3812" to-port="1" />
+		<edge from-layer="3803" from-port="1" to-layer="3806" to-port="0" />
+		<edge from-layer="3804" from-port="0" to-layer="3806" to-port="1" />
+		<edge from-layer="3805" from-port="0" to-layer="3806" to-port="2" />
+		<edge from-layer="3806" from-port="3" to-layer="3808" to-port="0" />
+		<edge from-layer="3807" from-port="0" to-layer="3808" to-port="1" />
+		<edge from-layer="3808" from-port="2" to-layer="3810" to-port="0" />
+		<edge from-layer="3809" from-port="0" to-layer="3810" to-port="1" />
+		<edge from-layer="3810" from-port="2" to-layer="3818" to-port="2" />
+		<edge from-layer="3810" from-port="2" to-layer="3821" to-port="0" />
+		<edge from-layer="3810" from-port="2" to-layer="3812" to-port="2" />
+		<edge from-layer="3811" from-port="0" to-layer="3812" to-port="3" />
+		<edge from-layer="3812" from-port="4" to-layer="3814" to-port="2" />
+		<edge from-layer="3813" from-port="0" to-layer="3814" to-port="3" />
+		<edge from-layer="3814" from-port="4" to-layer="3826" to-port="0" />
+		<edge from-layer="3815" from-port="0" to-layer="3818" to-port="0" />
+		<edge from-layer="3816" from-port="0" to-layer="3822" to-port="1" />
+		<edge from-layer="3816" from-port="0" to-layer="3818" to-port="1" />
+		<edge from-layer="3817" from-port="0" to-layer="3822" to-port="3" />
+		<edge from-layer="3817" from-port="0" to-layer="3818" to-port="3" />
+		<edge from-layer="3818" from-port="4" to-layer="3824" to-port="1" />
+		<edge from-layer="3819" from-port="0" to-layer="3822" to-port="0" />
+		<edge from-layer="3820" from-port="0" to-layer="3821" to-port="1" />
+		<edge from-layer="3821" from-port="2" to-layer="3822" to-port="2" />
+		<edge from-layer="3822" from-port="4" to-layer="3824" to-port="2" />
+		<edge from-layer="3823" from-port="0" to-layer="3824" to-port="3" />
+		<edge from-layer="3824" from-port="4" to-layer="3825" to-port="0" />
+		<edge from-layer="3825" from-port="1" to-layer="3826" to-port="1" />
+		<edge from-layer="3826" from-port="2" to-layer="3829" to-port="0" />
+		<edge from-layer="3827" from-port="0" to-layer="3828" to-port="0" />
+		<edge from-layer="3828" from-port="1" to-layer="3829" to-port="1" />
+		<edge from-layer="3829" from-port="2" to-layer="3830" to-port="1" />
+		<edge from-layer="3830" from-port="2" to-layer="3831" to-port="0" />
+		<edge from-layer="3831" from-port="2" to-layer="3833" to-port="0" />
+		<edge from-layer="3832" from-port="0" to-layer="3833" to-port="1" />
+		<edge from-layer="3833" from-port="2" to-layer="3835" to-port="0" />
+		<edge from-layer="3834" from-port="0" to-layer="3835" to-port="1" />
+		<edge from-layer="3835" from-port="2" to-layer="3838" to-port="0" />
+		<edge from-layer="3836" from-port="0" to-layer="3837" to-port="0" />
+		<edge from-layer="3837" from-port="1" to-layer="3838" to-port="1" />
+		<edge from-layer="3838" from-port="2" to-layer="3841" to-port="0" />
+		<edge from-layer="3839" from-port="0" to-layer="3840" to-port="0" />
+		<edge from-layer="3840" from-port="1" to-layer="3841" to-port="1" />
+		<edge from-layer="3841" from-port="2" to-layer="3842" to-port="0" />
+		<edge from-layer="3842" from-port="2" to-layer="3843" to-port="0" />
+		<edge from-layer="3843" from-port="2" to-layer="3851" to-port="0" />
+		<edge from-layer="3843" from-port="2" to-layer="3854" to-port="0" />
+		<edge from-layer="3843" from-port="2" to-layer="3846" to-port="0" />
+		<edge from-layer="3844" from-port="0" to-layer="3845" to-port="0" />
+		<edge from-layer="3845" from-port="1" to-layer="3846" to-port="1" />
+		<edge from-layer="3846" from-port="2" to-layer="3849" to-port="0" />
+		<edge from-layer="3847" from-port="0" to-layer="3848" to-port="0" />
+		<edge from-layer="3848" from-port="1" to-layer="3849" to-port="1" />
+		<edge from-layer="3849" from-port="2" to-layer="3899" to-port="0" />
+		<edge from-layer="3850" from-port="0" to-layer="3851" to-port="1" />
+		<edge from-layer="3851" from-port="2" to-layer="3853" to-port="0" />
+		<edge from-layer="3852" from-port="0" to-layer="3853" to-port="1" />
+		<edge from-layer="3853" from-port="2" to-layer="3855" to-port="0" />
+		<edge from-layer="3854" from-port="1" to-layer="3855" to-port="1" />
+		<edge from-layer="3855" from-port="2" to-layer="3858" to-port="0" />
+		<edge from-layer="3856" from-port="0" to-layer="3857" to-port="0" />
+		<edge from-layer="3857" from-port="1" to-layer="3858" to-port="1" />
+		<edge from-layer="3858" from-port="2" to-layer="3861" to-port="0" />
+		<edge from-layer="3859" from-port="0" to-layer="3860" to-port="0" />
+		<edge from-layer="3860" from-port="1" to-layer="3861" to-port="1" />
+		<edge from-layer="3861" from-port="2" to-layer="3862" to-port="0" />
+		<edge from-layer="3862" from-port="1" to-layer="3865" to-port="0" />
+		<edge from-layer="3863" from-port="0" to-layer="3864" to-port="0" />
+		<edge from-layer="3864" from-port="1" to-layer="3865" to-port="1" />
+		<edge from-layer="3865" from-port="2" to-layer="3868" to-port="0" />
+		<edge from-layer="3866" from-port="0" to-layer="3867" to-port="0" />
+		<edge from-layer="3867" from-port="1" to-layer="3868" to-port="1" />
+		<edge from-layer="3868" from-port="2" to-layer="3879" to-port="0" />
+		<edge from-layer="3869" from-port="0" to-layer="3870" to-port="0" />
+		<edge from-layer="3870" from-port="1" to-layer="3871" to-port="1" />
+		<edge from-layer="3871" from-port="2" to-layer="3874" to-port="0" />
+		<edge from-layer="3872" from-port="0" to-layer="3873" to-port="0" />
+		<edge from-layer="3873" from-port="1" to-layer="3874" to-port="1" />
+		<edge from-layer="3874" from-port="2" to-layer="3876" to-port="0" />
+		<edge from-layer="3875" from-port="0" to-layer="3876" to-port="1" />
+		<edge from-layer="3876" from-port="2" to-layer="3878" to-port="0" />
+		<edge from-layer="3877" from-port="0" to-layer="3878" to-port="1" />
+		<edge from-layer="3878" from-port="2" to-layer="3879" to-port="1" />
+		<edge from-layer="3879" from-port="2" to-layer="3881" to-port="0" />
+		<edge from-layer="3879" from-port="2" to-layer="3884" to-port="0" />
+		<edge from-layer="3880" from-port="0" to-layer="3881" to-port="1" />
+		<edge from-layer="3881" from-port="2" to-layer="3883" to-port="0" />
+		<edge from-layer="3882" from-port="0" to-layer="3883" to-port="1" />
+		<edge from-layer="3883" from-port="2" to-layer="3885" to-port="0" />
+		<edge from-layer="3884" from-port="1" to-layer="3885" to-port="1" />
+		<edge from-layer="3885" from-port="2" to-layer="3888" to-port="0" />
+		<edge from-layer="3886" from-port="0" to-layer="3887" to-port="0" />
+		<edge from-layer="3887" from-port="1" to-layer="3888" to-port="1" />
+		<edge from-layer="3888" from-port="2" to-layer="3891" to-port="0" />
+		<edge from-layer="3889" from-port="0" to-layer="3890" to-port="0" />
+		<edge from-layer="3890" from-port="1" to-layer="3891" to-port="1" />
+		<edge from-layer="3891" from-port="2" to-layer="3892" to-port="0" />
+		<edge from-layer="3892" from-port="1" to-layer="3895" to-port="0" />
+		<edge from-layer="3893" from-port="0" to-layer="3894" to-port="0" />
+		<edge from-layer="3894" from-port="1" to-layer="3895" to-port="1" />
+		<edge from-layer="3895" from-port="2" to-layer="3898" to-port="0" />
+		<edge from-layer="3896" from-port="0" to-layer="3897" to-port="0" />
+		<edge from-layer="3897" from-port="1" to-layer="3898" to-port="1" />
+		<edge from-layer="3898" from-port="2" to-layer="3899" to-port="1" />
+		<edge from-layer="3899" from-port="2" to-layer="3904" to-port="0" />
+		<edge from-layer="3899" from-port="2" to-layer="4080" to-port="1" />
+		<edge from-layer="3899" from-port="2" to-layer="3901" to-port="0" />
+		<edge from-layer="3900" from-port="0" to-layer="3901" to-port="1" />
+		<edge from-layer="3901" from-port="2" to-layer="3903" to-port="0" />
+		<edge from-layer="3902" from-port="0" to-layer="3903" to-port="1" />
+		<edge from-layer="3903" from-port="2" to-layer="3905" to-port="0" />
+		<edge from-layer="3904" from-port="1" to-layer="3905" to-port="1" />
+		<edge from-layer="3905" from-port="2" to-layer="3908" to-port="0" />
+		<edge from-layer="3906" from-port="0" to-layer="3907" to-port="0" />
+		<edge from-layer="3907" from-port="1" to-layer="3908" to-port="1" />
+		<edge from-layer="3908" from-port="2" to-layer="3911" to-port="0" />
+		<edge from-layer="3909" from-port="0" to-layer="3910" to-port="0" />
+		<edge from-layer="3910" from-port="1" to-layer="3911" to-port="1" />
+		<edge from-layer="3911" from-port="2" to-layer="3914" to-port="0" />
+		<edge from-layer="3912" from-port="0" to-layer="3913" to-port="0" />
+		<edge from-layer="3913" from-port="1" to-layer="3914" to-port="1" />
+		<edge from-layer="3914" from-port="2" to-layer="3917" to-port="0" />
+		<edge from-layer="3915" from-port="0" to-layer="3916" to-port="0" />
+		<edge from-layer="3916" from-port="1" to-layer="3917" to-port="1" />
+		<edge from-layer="3917" from-port="2" to-layer="3919" to-port="0" />
+		<edge from-layer="3918" from-port="0" to-layer="3919" to-port="1" />
+		<edge from-layer="3919" from-port="2" to-layer="3921" to-port="0" />
+		<edge from-layer="3920" from-port="0" to-layer="3921" to-port="1" />
+		<edge from-layer="3921" from-port="2" to-layer="3923" to-port="0" />
+		<edge from-layer="3921" from-port="2" to-layer="3973" to-port="1" />
+		<edge from-layer="3922" from-port="0" to-layer="3923" to-port="1" />
+		<edge from-layer="3923" from-port="2" to-layer="3926" to-port="0" />
+		<edge from-layer="3924" from-port="0" to-layer="3925" to-port="0" />
+		<edge from-layer="3925" from-port="1" to-layer="3926" to-port="1" />
+		<edge from-layer="3926" from-port="2" to-layer="3929" to-port="0" />
+		<edge from-layer="3927" from-port="0" to-layer="3928" to-port="0" />
+		<edge from-layer="3928" from-port="1" to-layer="3929" to-port="1" />
+		<edge from-layer="3929" from-port="2" to-layer="3955" to-port="0" />
+		<edge from-layer="3929" from-port="2" to-layer="3932" to-port="0" />
+		<edge from-layer="3929" from-port="2" to-layer="3941" to-port="0" />
+		<edge from-layer="3930" from-port="0" to-layer="3931" to-port="0" />
+		<edge from-layer="3931" from-port="1" to-layer="3932" to-port="1" />
+		<edge from-layer="3932" from-port="2" to-layer="3934" to-port="0" />
+		<edge from-layer="3933" from-port="0" to-layer="3934" to-port="1" />
+		<edge from-layer="3934" from-port="2" to-layer="3936" to-port="0" />
+		<edge from-layer="3935" from-port="0" to-layer="3936" to-port="1" />
+		<edge from-layer="3936" from-port="2" to-layer="3938" to-port="0" />
+		<edge from-layer="3937" from-port="0" to-layer="3938" to-port="1" />
+		<edge from-layer="3938" from-port="2" to-layer="3951" to-port="0" />
+		<edge from-layer="3939" from-port="0" to-layer="3940" to-port="0" />
+		<edge from-layer="3940" from-port="1" to-layer="3941" to-port="1" />
+		<edge from-layer="3941" from-port="2" to-layer="3943" to-port="0" />
+		<edge from-layer="3942" from-port="0" to-layer="3943" to-port="1" />
+		<edge from-layer="3943" from-port="2" to-layer="3945" to-port="0" />
+		<edge from-layer="3944" from-port="0" to-layer="3945" to-port="1" />
+		<edge from-layer="3945" from-port="2" to-layer="3947" to-port="0" />
+		<edge from-layer="3946" from-port="0" to-layer="3947" to-port="1" />
+		<edge from-layer="3947" from-port="2" to-layer="3950" to-port="0" />
+		<edge from-layer="3948" from-port="0" to-layer="3949" to-port="0" />
+		<edge from-layer="3949" from-port="1" to-layer="3950" to-port="1" />
+		<edge from-layer="3950" from-port="2" to-layer="3951" to-port="1" />
+		<edge from-layer="3951" from-port="2" to-layer="3952" to-port="0" />
+		<edge from-layer="3952" from-port="1" to-layer="3962" to-port="0" />
+		<edge from-layer="3953" from-port="0" to-layer="3954" to-port="0" />
+		<edge from-layer="3954" from-port="1" to-layer="3955" to-port="1" />
+		<edge from-layer="3955" from-port="2" to-layer="3957" to-port="0" />
+		<edge from-layer="3956" from-port="0" to-layer="3957" to-port="1" />
+		<edge from-layer="3957" from-port="2" to-layer="3959" to-port="0" />
+		<edge from-layer="3958" from-port="0" to-layer="3959" to-port="1" />
+		<edge from-layer="3959" from-port="2" to-layer="3961" to-port="0" />
+		<edge from-layer="3960" from-port="0" to-layer="3961" to-port="1" />
+		<edge from-layer="3961" from-port="2" to-layer="3962" to-port="1" />
+		<edge from-layer="3962" from-port="2" to-layer="3964" to-port="0" />
+		<edge from-layer="3963" from-port="0" to-layer="3964" to-port="1" />
+		<edge from-layer="3964" from-port="2" to-layer="3966" to-port="0" />
+		<edge from-layer="3965" from-port="0" to-layer="3966" to-port="1" />
+		<edge from-layer="3966" from-port="2" to-layer="3968" to-port="0" />
+		<edge from-layer="3967" from-port="0" to-layer="3968" to-port="1" />
+		<edge from-layer="3968" from-port="2" to-layer="3971" to-port="0" />
+		<edge from-layer="3969" from-port="0" to-layer="3970" to-port="0" />
+		<edge from-layer="3970" from-port="1" to-layer="3971" to-port="1" />
+		<edge from-layer="3971" from-port="2" to-layer="3972" to-port="1" />
+		<edge from-layer="3972" from-port="2" to-layer="3973" to-port="0" />
+		<edge from-layer="3973" from-port="2" to-layer="4025" to-port="1" />
+		<edge from-layer="3973" from-port="2" to-layer="3975" to-port="0" />
+		<edge from-layer="3974" from-port="0" to-layer="3975" to-port="1" />
+		<edge from-layer="3975" from-port="2" to-layer="3978" to-port="0" />
+		<edge from-layer="3976" from-port="0" to-layer="3977" to-port="0" />
+		<edge from-layer="3977" from-port="1" to-layer="3978" to-port="1" />
+		<edge from-layer="3978" from-port="2" to-layer="3981" to-port="0" />
+		<edge from-layer="3979" from-port="0" to-layer="3980" to-port="0" />
+		<edge from-layer="3980" from-port="1" to-layer="3981" to-port="1" />
+		<edge from-layer="3981" from-port="2" to-layer="3984" to-port="0" />
+		<edge from-layer="3982" from-port="0" to-layer="3983" to-port="0" />
+		<edge from-layer="3983" from-port="1" to-layer="3984" to-port="1" />
+		<edge from-layer="3984" from-port="2" to-layer="3986" to-port="0" />
+		<edge from-layer="3985" from-port="0" to-layer="3986" to-port="1" />
+		<edge from-layer="3986" from-port="2" to-layer="3988" to-port="0" />
+		<edge from-layer="3987" from-port="0" to-layer="3988" to-port="1" />
+		<edge from-layer="3988" from-port="2" to-layer="3990" to-port="0" />
+		<edge from-layer="3989" from-port="0" to-layer="3990" to-port="1" />
+		<edge from-layer="3990" from-port="2" to-layer="4003" to-port="0" />
+		<edge from-layer="3991" from-port="0" to-layer="3992" to-port="0" />
+		<edge from-layer="3992" from-port="1" to-layer="3993" to-port="1" />
+		<edge from-layer="3993" from-port="2" to-layer="3995" to-port="0" />
+		<edge from-layer="3994" from-port="0" to-layer="3995" to-port="1" />
+		<edge from-layer="3995" from-port="2" to-layer="3997" to-port="0" />
+		<edge from-layer="3996" from-port="0" to-layer="3997" to-port="1" />
+		<edge from-layer="3997" from-port="2" to-layer="3999" to-port="0" />
+		<edge from-layer="3998" from-port="0" to-layer="3999" to-port="1" />
+		<edge from-layer="3999" from-port="2" to-layer="4002" to-port="0" />
+		<edge from-layer="4000" from-port="0" to-layer="4001" to-port="0" />
+		<edge from-layer="4001" from-port="1" to-layer="4002" to-port="1" />
+		<edge from-layer="4002" from-port="2" to-layer="4003" to-port="1" />
+		<edge from-layer="4003" from-port="2" to-layer="4004" to-port="0" />
+		<edge from-layer="4004" from-port="1" to-layer="4014" to-port="0" />
+		<edge from-layer="4005" from-port="0" to-layer="4006" to-port="0" />
+		<edge from-layer="4006" from-port="1" to-layer="4007" to-port="1" />
+		<edge from-layer="4007" from-port="2" to-layer="4009" to-port="0" />
+		<edge from-layer="4008" from-port="0" to-layer="4009" to-port="1" />
+		<edge from-layer="4009" from-port="2" to-layer="4011" to-port="0" />
+		<edge from-layer="4010" from-port="0" to-layer="4011" to-port="1" />
+		<edge from-layer="4011" from-port="2" to-layer="4013" to-port="0" />
+		<edge from-layer="4012" from-port="0" to-layer="4013" to-port="1" />
+		<edge from-layer="4013" from-port="2" to-layer="4014" to-port="1" />
+		<edge from-layer="4014" from-port="2" to-layer="4016" to-port="0" />
+		<edge from-layer="4015" from-port="0" to-layer="4016" to-port="1" />
+		<edge from-layer="4016" from-port="2" to-layer="4018" to-port="0" />
+		<edge from-layer="4017" from-port="0" to-layer="4018" to-port="1" />
+		<edge from-layer="4018" from-port="2" to-layer="4020" to-port="0" />
+		<edge from-layer="4019" from-port="0" to-layer="4020" to-port="1" />
+		<edge from-layer="4020" from-port="2" to-layer="4023" to-port="0" />
+		<edge from-layer="4021" from-port="0" to-layer="4022" to-port="0" />
+		<edge from-layer="4022" from-port="1" to-layer="4023" to-port="1" />
+		<edge from-layer="4023" from-port="2" to-layer="4024" to-port="1" />
+		<edge from-layer="4024" from-port="2" to-layer="4025" to-port="0" />
+		<edge from-layer="4025" from-port="2" to-layer="4069" to-port="1" />
+		<edge from-layer="4025" from-port="2" to-layer="4027" to-port="0" />
+		<edge from-layer="4026" from-port="0" to-layer="4027" to-port="1" />
+		<edge from-layer="4027" from-port="2" to-layer="4030" to-port="0" />
+		<edge from-layer="4028" from-port="0" to-layer="4029" to-port="0" />
+		<edge from-layer="4029" from-port="1" to-layer="4030" to-port="1" />
+		<edge from-layer="4030" from-port="2" to-layer="4033" to-port="0" />
+		<edge from-layer="4031" from-port="0" to-layer="4032" to-port="0" />
+		<edge from-layer="4032" from-port="1" to-layer="4033" to-port="1" />
+		<edge from-layer="4033" from-port="2" to-layer="4036" to-port="0" />
+		<edge from-layer="4034" from-port="0" to-layer="4035" to-port="0" />
+		<edge from-layer="4035" from-port="1" to-layer="4036" to-port="1" />
+		<edge from-layer="4036" from-port="2" to-layer="4037" to-port="1" />
+		<edge from-layer="4037" from-port="2" to-layer="4041" to-port="0" />
+		<edge from-layer="4037" from-port="2" to-layer="4052" to-port="0" />
+		<edge from-layer="4037" from-port="2" to-layer="4062" to-port="0" />
+		<edge from-layer="4038" from-port="0" to-layer="4052" to-port="1" />
+		<edge from-layer="4039" from-port="0" to-layer="4050" to-port="0" />
+		<edge from-layer="4040" from-port="0" to-layer="4050" to-port="1" />
+		<edge from-layer="4041" from-port="1" to-layer="4044" to-port="0" />
+		<edge from-layer="4042" from-port="0" to-layer="4044" to-port="1" />
+		<edge from-layer="4043" from-port="0" to-layer="4044" to-port="2" />
+		<edge from-layer="4044" from-port="3" to-layer="4046" to-port="0" />
+		<edge from-layer="4045" from-port="0" to-layer="4046" to-port="1" />
+		<edge from-layer="4046" from-port="2" to-layer="4048" to-port="0" />
+		<edge from-layer="4047" from-port="0" to-layer="4048" to-port="1" />
+		<edge from-layer="4048" from-port="2" to-layer="4050" to-port="2" />
+		<edge from-layer="4048" from-port="2" to-layer="4056" to-port="2" />
+		<edge from-layer="4048" from-port="2" to-layer="4059" to-port="0" />
+		<edge from-layer="4049" from-port="0" to-layer="4050" to-port="3" />
+		<edge from-layer="4050" from-port="4" to-layer="4052" to-port="2" />
+		<edge from-layer="4051" from-port="0" to-layer="4052" to-port="3" />
+		<edge from-layer="4052" from-port="4" to-layer="4064" to-port="0" />
+		<edge from-layer="4053" from-port="0" to-layer="4056" to-port="0" />
+		<edge from-layer="4054" from-port="0" to-layer="4056" to-port="1" />
+		<edge from-layer="4054" from-port="0" to-layer="4060" to-port="1" />
+		<edge from-layer="4055" from-port="0" to-layer="4060" to-port="3" />
+		<edge from-layer="4055" from-port="0" to-layer="4056" to-port="3" />
+		<edge from-layer="4056" from-port="4" to-layer="4062" to-port="1" />
+		<edge from-layer="4057" from-port="0" to-layer="4060" to-port="0" />
+		<edge from-layer="4058" from-port="0" to-layer="4059" to-port="1" />
+		<edge from-layer="4059" from-port="2" to-layer="4060" to-port="2" />
+		<edge from-layer="4060" from-port="4" to-layer="4062" to-port="2" />
+		<edge from-layer="4061" from-port="0" to-layer="4062" to-port="3" />
+		<edge from-layer="4062" from-port="4" to-layer="4063" to-port="0" />
+		<edge from-layer="4063" from-port="1" to-layer="4064" to-port="1" />
+		<edge from-layer="4064" from-port="2" to-layer="4067" to-port="0" />
+		<edge from-layer="4065" from-port="0" to-layer="4066" to-port="0" />
+		<edge from-layer="4066" from-port="1" to-layer="4067" to-port="1" />
+		<edge from-layer="4067" from-port="2" to-layer="4068" to-port="1" />
+		<edge from-layer="4068" from-port="2" to-layer="4069" to-port="0" />
+		<edge from-layer="4069" from-port="2" to-layer="4071" to-port="0" />
+		<edge from-layer="4070" from-port="0" to-layer="4071" to-port="1" />
+		<edge from-layer="4071" from-port="2" to-layer="4073" to-port="0" />
+		<edge from-layer="4072" from-port="0" to-layer="4073" to-port="1" />
+		<edge from-layer="4073" from-port="2" to-layer="4076" to-port="0" />
+		<edge from-layer="4074" from-port="0" to-layer="4075" to-port="0" />
+		<edge from-layer="4075" from-port="1" to-layer="4076" to-port="1" />
+		<edge from-layer="4076" from-port="2" to-layer="4079" to-port="0" />
+		<edge from-layer="4077" from-port="0" to-layer="4078" to-port="0" />
+		<edge from-layer="4078" from-port="1" to-layer="4079" to-port="1" />
+		<edge from-layer="4079" from-port="2" to-layer="4080" to-port="0" />
+		<edge from-layer="4080" from-port="2" to-layer="4081" to-port="0" />
+		<edge from-layer="4081" from-port="2" to-layer="4084" to-port="0" />
+		<edge from-layer="4081" from-port="2" to-layer="4089" to-port="0" />
+		<edge from-layer="4081" from-port="2" to-layer="4092" to-port="0" />
+		<edge from-layer="4082" from-port="0" to-layer="4083" to-port="0" />
+		<edge from-layer="4083" from-port="1" to-layer="4084" to-port="1" />
+		<edge from-layer="4084" from-port="2" to-layer="4087" to-port="0" />
+		<edge from-layer="4085" from-port="0" to-layer="4086" to-port="0" />
+		<edge from-layer="4086" from-port="1" to-layer="4087" to-port="1" />
+		<edge from-layer="4087" from-port="2" to-layer="4137" to-port="0" />
+		<edge from-layer="4088" from-port="0" to-layer="4089" to-port="1" />
+		<edge from-layer="4089" from-port="2" to-layer="4091" to-port="0" />
+		<edge from-layer="4090" from-port="0" to-layer="4091" to-port="1" />
+		<edge from-layer="4091" from-port="2" to-layer="4093" to-port="0" />
+		<edge from-layer="4092" from-port="1" to-layer="4093" to-port="1" />
+		<edge from-layer="4093" from-port="2" to-layer="4096" to-port="0" />
+		<edge from-layer="4094" from-port="0" to-layer="4095" to-port="0" />
+		<edge from-layer="4095" from-port="1" to-layer="4096" to-port="1" />
+		<edge from-layer="4096" from-port="2" to-layer="4099" to-port="0" />
+		<edge from-layer="4097" from-port="0" to-layer="4098" to-port="0" />
+		<edge from-layer="4098" from-port="1" to-layer="4099" to-port="1" />
+		<edge from-layer="4099" from-port="2" to-layer="4100" to-port="0" />
+		<edge from-layer="4100" from-port="1" to-layer="4103" to-port="0" />
+		<edge from-layer="4101" from-port="0" to-layer="4102" to-port="0" />
+		<edge from-layer="4102" from-port="1" to-layer="4103" to-port="1" />
+		<edge from-layer="4103" from-port="2" to-layer="4106" to-port="0" />
+		<edge from-layer="4104" from-port="0" to-layer="4105" to-port="0" />
+		<edge from-layer="4105" from-port="1" to-layer="4106" to-port="1" />
+		<edge from-layer="4106" from-port="2" to-layer="4117" to-port="0" />
+		<edge from-layer="4107" from-port="0" to-layer="4108" to-port="0" />
+		<edge from-layer="4108" from-port="1" to-layer="4109" to-port="1" />
+		<edge from-layer="4109" from-port="2" to-layer="4112" to-port="0" />
+		<edge from-layer="4110" from-port="0" to-layer="4111" to-port="0" />
+		<edge from-layer="4111" from-port="1" to-layer="4112" to-port="1" />
+		<edge from-layer="4112" from-port="2" to-layer="4114" to-port="0" />
+		<edge from-layer="4113" from-port="0" to-layer="4114" to-port="1" />
+		<edge from-layer="4114" from-port="2" to-layer="4116" to-port="0" />
+		<edge from-layer="4115" from-port="0" to-layer="4116" to-port="1" />
+		<edge from-layer="4116" from-port="2" to-layer="4117" to-port="1" />
+		<edge from-layer="4117" from-port="2" to-layer="4119" to-port="0" />
+		<edge from-layer="4117" from-port="2" to-layer="4122" to-port="0" />
+		<edge from-layer="4118" from-port="0" to-layer="4119" to-port="1" />
+		<edge from-layer="4119" from-port="2" to-layer="4121" to-port="0" />
+		<edge from-layer="4120" from-port="0" to-layer="4121" to-port="1" />
+		<edge from-layer="4121" from-port="2" to-layer="4123" to-port="0" />
+		<edge from-layer="4122" from-port="1" to-layer="4123" to-port="1" />
+		<edge from-layer="4123" from-port="2" to-layer="4126" to-port="0" />
+		<edge from-layer="4124" from-port="0" to-layer="4125" to-port="0" />
+		<edge from-layer="4125" from-port="1" to-layer="4126" to-port="1" />
+		<edge from-layer="4126" from-port="2" to-layer="4129" to-port="0" />
+		<edge from-layer="4127" from-port="0" to-layer="4128" to-port="0" />
+		<edge from-layer="4128" from-port="1" to-layer="4129" to-port="1" />
+		<edge from-layer="4129" from-port="2" to-layer="4130" to-port="0" />
+		<edge from-layer="4130" from-port="1" to-layer="4133" to-port="0" />
+		<edge from-layer="4131" from-port="0" to-layer="4132" to-port="0" />
+		<edge from-layer="4132" from-port="1" to-layer="4133" to-port="1" />
+		<edge from-layer="4133" from-port="2" to-layer="4136" to-port="0" />
+		<edge from-layer="4134" from-port="0" to-layer="4135" to-port="0" />
+		<edge from-layer="4135" from-port="1" to-layer="4136" to-port="1" />
+		<edge from-layer="4136" from-port="2" to-layer="4137" to-port="1" />
+		<edge from-layer="4137" from-port="2" to-layer="4142" to-port="0" />
+		<edge from-layer="4137" from-port="2" to-layer="4139" to-port="0" />
+		<edge from-layer="4137" from-port="2" to-layer="4318" to-port="1" />
+		<edge from-layer="4138" from-port="0" to-layer="4139" to-port="1" />
+		<edge from-layer="4139" from-port="2" to-layer="4141" to-port="0" />
+		<edge from-layer="4140" from-port="0" to-layer="4141" to-port="1" />
+		<edge from-layer="4141" from-port="2" to-layer="4143" to-port="0" />
+		<edge from-layer="4142" from-port="1" to-layer="4143" to-port="1" />
+		<edge from-layer="4143" from-port="2" to-layer="4146" to-port="0" />
+		<edge from-layer="4144" from-port="0" to-layer="4145" to-port="0" />
+		<edge from-layer="4145" from-port="1" to-layer="4146" to-port="1" />
+		<edge from-layer="4146" from-port="2" to-layer="4149" to-port="0" />
+		<edge from-layer="4147" from-port="0" to-layer="4148" to-port="0" />
+		<edge from-layer="4148" from-port="1" to-layer="4149" to-port="1" />
+		<edge from-layer="4149" from-port="2" to-layer="4152" to-port="0" />
+		<edge from-layer="4150" from-port="0" to-layer="4151" to-port="0" />
+		<edge from-layer="4151" from-port="1" to-layer="4152" to-port="1" />
+		<edge from-layer="4152" from-port="2" to-layer="4155" to-port="0" />
+		<edge from-layer="4153" from-port="0" to-layer="4154" to-port="0" />
+		<edge from-layer="4154" from-port="1" to-layer="4155" to-port="1" />
+		<edge from-layer="4155" from-port="2" to-layer="4157" to-port="0" />
+		<edge from-layer="4156" from-port="0" to-layer="4157" to-port="1" />
+		<edge from-layer="4157" from-port="2" to-layer="4159" to-port="0" />
+		<edge from-layer="4158" from-port="0" to-layer="4159" to-port="1" />
+		<edge from-layer="4159" from-port="2" to-layer="4161" to-port="0" />
+		<edge from-layer="4159" from-port="2" to-layer="4211" to-port="1" />
+		<edge from-layer="4160" from-port="0" to-layer="4161" to-port="1" />
+		<edge from-layer="4161" from-port="2" to-layer="4164" to-port="0" />
+		<edge from-layer="4162" from-port="0" to-layer="4163" to-port="0" />
+		<edge from-layer="4163" from-port="1" to-layer="4164" to-port="1" />
+		<edge from-layer="4164" from-port="2" to-layer="4167" to-port="0" />
+		<edge from-layer="4165" from-port="0" to-layer="4166" to-port="0" />
+		<edge from-layer="4166" from-port="1" to-layer="4167" to-port="1" />
+		<edge from-layer="4167" from-port="2" to-layer="4170" to-port="0" />
+		<edge from-layer="4167" from-port="2" to-layer="4193" to-port="0" />
+		<edge from-layer="4167" from-port="2" to-layer="4179" to-port="0" />
+		<edge from-layer="4168" from-port="0" to-layer="4169" to-port="0" />
+		<edge from-layer="4169" from-port="1" to-layer="4170" to-port="1" />
+		<edge from-layer="4170" from-port="2" to-layer="4172" to-port="0" />
+		<edge from-layer="4171" from-port="0" to-layer="4172" to-port="1" />
+		<edge from-layer="4172" from-port="2" to-layer="4174" to-port="0" />
+		<edge from-layer="4173" from-port="0" to-layer="4174" to-port="1" />
+		<edge from-layer="4174" from-port="2" to-layer="4176" to-port="0" />
+		<edge from-layer="4175" from-port="0" to-layer="4176" to-port="1" />
+		<edge from-layer="4176" from-port="2" to-layer="4189" to-port="0" />
+		<edge from-layer="4177" from-port="0" to-layer="4178" to-port="0" />
+		<edge from-layer="4178" from-port="1" to-layer="4179" to-port="1" />
+		<edge from-layer="4179" from-port="2" to-layer="4181" to-port="0" />
+		<edge from-layer="4180" from-port="0" to-layer="4181" to-port="1" />
+		<edge from-layer="4181" from-port="2" to-layer="4183" to-port="0" />
+		<edge from-layer="4182" from-port="0" to-layer="4183" to-port="1" />
+		<edge from-layer="4183" from-port="2" to-layer="4185" to-port="0" />
+		<edge from-layer="4184" from-port="0" to-layer="4185" to-port="1" />
+		<edge from-layer="4185" from-port="2" to-layer="4188" to-port="0" />
+		<edge from-layer="4186" from-port="0" to-layer="4187" to-port="0" />
+		<edge from-layer="4187" from-port="1" to-layer="4188" to-port="1" />
+		<edge from-layer="4188" from-port="2" to-layer="4189" to-port="1" />
+		<edge from-layer="4189" from-port="2" to-layer="4190" to-port="0" />
+		<edge from-layer="4190" from-port="1" to-layer="4200" to-port="0" />
+		<edge from-layer="4191" from-port="0" to-layer="4192" to-port="0" />
+		<edge from-layer="4192" from-port="1" to-layer="4193" to-port="1" />
+		<edge from-layer="4193" from-port="2" to-layer="4195" to-port="0" />
+		<edge from-layer="4194" from-port="0" to-layer="4195" to-port="1" />
+		<edge from-layer="4195" from-port="2" to-layer="4197" to-port="0" />
+		<edge from-layer="4196" from-port="0" to-layer="4197" to-port="1" />
+		<edge from-layer="4197" from-port="2" to-layer="4199" to-port="0" />
+		<edge from-layer="4198" from-port="0" to-layer="4199" to-port="1" />
+		<edge from-layer="4199" from-port="2" to-layer="4200" to-port="1" />
+		<edge from-layer="4200" from-port="2" to-layer="4202" to-port="0" />
+		<edge from-layer="4201" from-port="0" to-layer="4202" to-port="1" />
+		<edge from-layer="4202" from-port="2" to-layer="4204" to-port="0" />
+		<edge from-layer="4203" from-port="0" to-layer="4204" to-port="1" />
+		<edge from-layer="4204" from-port="2" to-layer="4206" to-port="0" />
+		<edge from-layer="4205" from-port="0" to-layer="4206" to-port="1" />
+		<edge from-layer="4206" from-port="2" to-layer="4209" to-port="0" />
+		<edge from-layer="4207" from-port="0" to-layer="4208" to-port="0" />
+		<edge from-layer="4208" from-port="1" to-layer="4209" to-port="1" />
+		<edge from-layer="4209" from-port="2" to-layer="4210" to-port="1" />
+		<edge from-layer="4210" from-port="2" to-layer="4211" to-port="0" />
+		<edge from-layer="4211" from-port="2" to-layer="4213" to-port="0" />
+		<edge from-layer="4211" from-port="2" to-layer="4263" to-port="1" />
+		<edge from-layer="4212" from-port="0" to-layer="4213" to-port="1" />
+		<edge from-layer="4213" from-port="2" to-layer="4216" to-port="0" />
+		<edge from-layer="4214" from-port="0" to-layer="4215" to-port="0" />
+		<edge from-layer="4215" from-port="1" to-layer="4216" to-port="1" />
+		<edge from-layer="4216" from-port="2" to-layer="4219" to-port="0" />
+		<edge from-layer="4217" from-port="0" to-layer="4218" to-port="0" />
+		<edge from-layer="4218" from-port="1" to-layer="4219" to-port="1" />
+		<edge from-layer="4219" from-port="2" to-layer="4222" to-port="0" />
+		<edge from-layer="4220" from-port="0" to-layer="4221" to-port="0" />
+		<edge from-layer="4221" from-port="1" to-layer="4222" to-port="1" />
+		<edge from-layer="4222" from-port="2" to-layer="4224" to-port="0" />
+		<edge from-layer="4223" from-port="0" to-layer="4224" to-port="1" />
+		<edge from-layer="4224" from-port="2" to-layer="4226" to-port="0" />
+		<edge from-layer="4225" from-port="0" to-layer="4226" to-port="1" />
+		<edge from-layer="4226" from-port="2" to-layer="4228" to-port="0" />
+		<edge from-layer="4227" from-port="0" to-layer="4228" to-port="1" />
+		<edge from-layer="4228" from-port="2" to-layer="4241" to-port="0" />
+		<edge from-layer="4229" from-port="0" to-layer="4230" to-port="0" />
+		<edge from-layer="4230" from-port="1" to-layer="4231" to-port="1" />
+		<edge from-layer="4231" from-port="2" to-layer="4233" to-port="0" />
+		<edge from-layer="4232" from-port="0" to-layer="4233" to-port="1" />
+		<edge from-layer="4233" from-port="2" to-layer="4235" to-port="0" />
+		<edge from-layer="4234" from-port="0" to-layer="4235" to-port="1" />
+		<edge from-layer="4235" from-port="2" to-layer="4237" to-port="0" />
+		<edge from-layer="4236" from-port="0" to-layer="4237" to-port="1" />
+		<edge from-layer="4237" from-port="2" to-layer="4240" to-port="0" />
+		<edge from-layer="4238" from-port="0" to-layer="4239" to-port="0" />
+		<edge from-layer="4239" from-port="1" to-layer="4240" to-port="1" />
+		<edge from-layer="4240" from-port="2" to-layer="4241" to-port="1" />
+		<edge from-layer="4241" from-port="2" to-layer="4242" to-port="0" />
+		<edge from-layer="4242" from-port="1" to-layer="4252" to-port="0" />
+		<edge from-layer="4243" from-port="0" to-layer="4244" to-port="0" />
+		<edge from-layer="4244" from-port="1" to-layer="4245" to-port="1" />
+		<edge from-layer="4245" from-port="2" to-layer="4247" to-port="0" />
+		<edge from-layer="4246" from-port="0" to-layer="4247" to-port="1" />
+		<edge from-layer="4247" from-port="2" to-layer="4249" to-port="0" />
+		<edge from-layer="4248" from-port="0" to-layer="4249" to-port="1" />
+		<edge from-layer="4249" from-port="2" to-layer="4251" to-port="0" />
+		<edge from-layer="4250" from-port="0" to-layer="4251" to-port="1" />
+		<edge from-layer="4251" from-port="2" to-layer="4252" to-port="1" />
+		<edge from-layer="4252" from-port="2" to-layer="4254" to-port="0" />
+		<edge from-layer="4253" from-port="0" to-layer="4254" to-port="1" />
+		<edge from-layer="4254" from-port="2" to-layer="4256" to-port="0" />
+		<edge from-layer="4255" from-port="0" to-layer="4256" to-port="1" />
+		<edge from-layer="4256" from-port="2" to-layer="4258" to-port="0" />
+		<edge from-layer="4257" from-port="0" to-layer="4258" to-port="1" />
+		<edge from-layer="4258" from-port="2" to-layer="4261" to-port="0" />
+		<edge from-layer="4259" from-port="0" to-layer="4260" to-port="0" />
+		<edge from-layer="4260" from-port="1" to-layer="4261" to-port="1" />
+		<edge from-layer="4261" from-port="2" to-layer="4262" to-port="1" />
+		<edge from-layer="4262" from-port="2" to-layer="4263" to-port="0" />
+		<edge from-layer="4263" from-port="2" to-layer="4265" to-port="0" />
+		<edge from-layer="4263" from-port="2" to-layer="4307" to-port="1" />
+		<edge from-layer="4264" from-port="0" to-layer="4265" to-port="1" />
+		<edge from-layer="4265" from-port="2" to-layer="4268" to-port="0" />
+		<edge from-layer="4266" from-port="0" to-layer="4267" to-port="0" />
+		<edge from-layer="4267" from-port="1" to-layer="4268" to-port="1" />
+		<edge from-layer="4268" from-port="2" to-layer="4271" to-port="0" />
+		<edge from-layer="4269" from-port="0" to-layer="4270" to-port="0" />
+		<edge from-layer="4270" from-port="1" to-layer="4271" to-port="1" />
+		<edge from-layer="4271" from-port="2" to-layer="4274" to-port="0" />
+		<edge from-layer="4272" from-port="0" to-layer="4273" to-port="0" />
+		<edge from-layer="4273" from-port="1" to-layer="4274" to-port="1" />
+		<edge from-layer="4274" from-port="2" to-layer="4275" to-port="1" />
+		<edge from-layer="4275" from-port="2" to-layer="4279" to-port="0" />
+		<edge from-layer="4275" from-port="2" to-layer="4290" to-port="0" />
+		<edge from-layer="4275" from-port="2" to-layer="4300" to-port="0" />
+		<edge from-layer="4276" from-port="0" to-layer="4290" to-port="1" />
+		<edge from-layer="4277" from-port="0" to-layer="4288" to-port="0" />
+		<edge from-layer="4278" from-port="0" to-layer="4288" to-port="1" />
+		<edge from-layer="4279" from-port="1" to-layer="4282" to-port="0" />
+		<edge from-layer="4280" from-port="0" to-layer="4282" to-port="1" />
+		<edge from-layer="4281" from-port="0" to-layer="4282" to-port="2" />
+		<edge from-layer="4282" from-port="3" to-layer="4284" to-port="0" />
+		<edge from-layer="4283" from-port="0" to-layer="4284" to-port="1" />
+		<edge from-layer="4284" from-port="2" to-layer="4286" to-port="0" />
+		<edge from-layer="4285" from-port="0" to-layer="4286" to-port="1" />
+		<edge from-layer="4286" from-port="2" to-layer="4294" to-port="2" />
+		<edge from-layer="4286" from-port="2" to-layer="4288" to-port="2" />
+		<edge from-layer="4286" from-port="2" to-layer="4297" to-port="0" />
+		<edge from-layer="4287" from-port="0" to-layer="4288" to-port="3" />
+		<edge from-layer="4288" from-port="4" to-layer="4290" to-port="2" />
+		<edge from-layer="4289" from-port="0" to-layer="4290" to-port="3" />
+		<edge from-layer="4290" from-port="4" to-layer="4302" to-port="0" />
+		<edge from-layer="4291" from-port="0" to-layer="4294" to-port="0" />
+		<edge from-layer="4292" from-port="0" to-layer="4294" to-port="1" />
+		<edge from-layer="4292" from-port="0" to-layer="4298" to-port="1" />
+		<edge from-layer="4293" from-port="0" to-layer="4294" to-port="3" />
+		<edge from-layer="4293" from-port="0" to-layer="4298" to-port="3" />
+		<edge from-layer="4294" from-port="4" to-layer="4300" to-port="1" />
+		<edge from-layer="4295" from-port="0" to-layer="4298" to-port="0" />
+		<edge from-layer="4296" from-port="0" to-layer="4297" to-port="1" />
+		<edge from-layer="4297" from-port="2" to-layer="4298" to-port="2" />
+		<edge from-layer="4298" from-port="4" to-layer="4300" to-port="2" />
+		<edge from-layer="4299" from-port="0" to-layer="4300" to-port="3" />
+		<edge from-layer="4300" from-port="4" to-layer="4301" to-port="0" />
+		<edge from-layer="4301" from-port="1" to-layer="4302" to-port="1" />
+		<edge from-layer="4302" from-port="2" to-layer="4305" to-port="0" />
+		<edge from-layer="4303" from-port="0" to-layer="4304" to-port="0" />
+		<edge from-layer="4304" from-port="1" to-layer="4305" to-port="1" />
+		<edge from-layer="4305" from-port="2" to-layer="4306" to-port="1" />
+		<edge from-layer="4306" from-port="2" to-layer="4307" to-port="0" />
+		<edge from-layer="4307" from-port="2" to-layer="4309" to-port="0" />
+		<edge from-layer="4308" from-port="0" to-layer="4309" to-port="1" />
+		<edge from-layer="4309" from-port="2" to-layer="4311" to-port="0" />
+		<edge from-layer="4310" from-port="0" to-layer="4311" to-port="1" />
+		<edge from-layer="4311" from-port="2" to-layer="4314" to-port="0" />
+		<edge from-layer="4312" from-port="0" to-layer="4313" to-port="0" />
+		<edge from-layer="4313" from-port="1" to-layer="4314" to-port="1" />
+		<edge from-layer="4314" from-port="2" to-layer="4317" to-port="0" />
+		<edge from-layer="4315" from-port="0" to-layer="4316" to-port="0" />
+		<edge from-layer="4316" from-port="1" to-layer="4317" to-port="1" />
+		<edge from-layer="4317" from-port="2" to-layer="4318" to-port="0" />
+		<edge from-layer="4318" from-port="2" to-layer="4320" to-port="0" />
+		<edge from-layer="4318" from-port="2" to-layer="4323" to-port="0" />
+		<edge from-layer="4319" from-port="0" to-layer="4320" to-port="1" />
+		<edge from-layer="4320" from-port="2" to-layer="4322" to-port="0" />
+		<edge from-layer="4321" from-port="0" to-layer="4322" to-port="1" />
+		<edge from-layer="4322" from-port="2" to-layer="4324" to-port="0" />
+		<edge from-layer="4323" from-port="1" to-layer="4324" to-port="1" />
+		<edge from-layer="4324" from-port="2" to-layer="4327" to-port="0" />
+		<edge from-layer="4325" from-port="0" to-layer="4326" to-port="0" />
+		<edge from-layer="4326" from-port="1" to-layer="4327" to-port="1" />
+		<edge from-layer="4327" from-port="2" to-layer="4330" to-port="0" />
+		<edge from-layer="4328" from-port="0" to-layer="4329" to-port="0" />
+		<edge from-layer="4329" from-port="1" to-layer="4330" to-port="1" />
+		<edge from-layer="4330" from-port="2" to-layer="4331" to-port="0" />
+		<edge from-layer="4331" from-port="1" to-layer="4334" to-port="0" />
+		<edge from-layer="4332" from-port="0" to-layer="4333" to-port="0" />
+		<edge from-layer="4333" from-port="1" to-layer="4334" to-port="1" />
+		<edge from-layer="4334" from-port="2" to-layer="4337" to-port="0" />
+		<edge from-layer="4335" from-port="0" to-layer="4336" to-port="0" />
+		<edge from-layer="4336" from-port="1" to-layer="4337" to-port="1" />
+		<edge from-layer="4337" from-port="2" to-layer="4338" to-port="0" />
 	</edges>
-	<meta_data>
-		<MO_version value="2022.1.0-7019-cdb9bec7210-releases/2022/1"/>
-		<Runtime_version value="2022.1.0-7019-cdb9bec7210-releases/2022/1"/>
-		<legacy_path value="False"/>
-		<cli_parameters>
-			<caffe_parser_path value="DIR"/>
-			<compress_fp16 value="False"/>
-			<data_type value="float"/>
-			<disable_nhwc_to_nchw value="False"/>
-			<disable_omitting_optional value="False"/>
-			<disable_resnet_optimization value="False"/>
-			<disable_weights_compression value="False"/>
-			<enable_concat_optimization value="False"/>
-			<enable_flattening_nested_params value="False"/>
-			<enable_ssd_gluoncv value="False"/>
-			<extensions value="DIR"/>
-			<framework value="onnx"/>
-			<freeze_placeholder_with_value value="{}"/>
-			<input_model value="DIR/unet.onnx"/>
-			<input_model_is_text value="False"/>
-			<k value="DIR/CustomLayersMapping.xml"/>
-			<layout value="()"/>
-			<layout_values value="{}"/>
-			<legacy_mxnet_model value="False"/>
-			<log_level value="ERROR"/>
-			<mean_scale_values value="{}"/>
-			<mean_values value="()"/>
-			<model_name value="unet"/>
-			<output_dir value="DIR"/>
-			<placeholder_data_types value="{}"/>
-			<progress value="False"/>
-			<remove_memory value="False"/>
-			<remove_output_softmax value="False"/>
-			<reverse_input_channels value="False"/>
-			<save_params_from_nd value="False"/>
-			<scale_values value="()"/>
-			<silent value="False"/>
-			<source_layout value="()"/>
-			<static_shape value="False"/>
-			<stream_output value="False"/>
-			<target_layout value="()"/>
-			<transform value=""/>
-			<use_legacy_frontend value="False"/>
-			<use_new_frontend value="False"/>
-			<unset unset_cli_parameters="batch, counts, disable_fusing, finegrain_fusing, input, input_checkpoint, input_meta_graph, input_proto, input_shape, input_symbol, mean_file, mean_file_offsets, nd_prefix_name, output, placeholder_shapes, pretrained_model_name, saved_model_dir, saved_model_tags, scale, tensorboard_logdir, tensorflow_custom_layer_libraries, tensorflow_custom_operations_config_update, tensorflow_object_detection_api_pipeline_config, tensorflow_use_custom_operations_config, transformations_config"/>
-		</cli_parameters>
-	</meta_data>
+	<rt_info>
+		<MO_version value="2022.3.0-9052-9752fafe8eb-releases/2022/3" />
+		<Runtime_version value="2022.3.0-9052-9752fafe8eb-releases/2022/3" />
+		<conversion_parameters>
+			<compress_to_fp16 value="True" />
+			<input_model value="DIR\unet.onnx" />
+		</conversion_parameters>
+		<legacy_frontend value="False" />
+	</rt_info>
 </net>