aotrih commited on
Commit
e8f351b
·
1 Parent(s): 8a300b7

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -0
  2. LICENSE +0 -0
  3. LICENSE_NOTICE.txt +7 -0
  4. README.md +24 -5
  5. config.json +112 -0
  6. distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  7. distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  8. distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/metadata.json +90 -0
  9. distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/model.mil +0 -0
  10. distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  11. distil-whisper_distil-large-v3/LICENSE_NOTICE.txt +7 -0
  12. distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  13. distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  14. distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/metadata.json +74 -0
  15. distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/model.mil +66 -0
  16. distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  17. distil-whisper_distil-large-v3/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  18. distil-whisper_distil-large-v3/TextDecoder.mlmodelc/coremldata.bin +3 -0
  19. distil-whisper_distil-large-v3/TextDecoder.mlmodelc/metadata.json +183 -0
  20. distil-whisper_distil-large-v3/TextDecoder.mlmodelc/model.mil +529 -0
  21. distil-whisper_distil-large-v3/TextDecoder.mlmodelc/weights/weight.bin +3 -0
  22. distil-whisper_distil-large-v3/config.json +1 -0
  23. distil-whisper_distil-large-v3/generation_config.json +1 -0
  24. distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  25. distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  26. distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/metadata.json +91 -0
  27. distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/model.mil +0 -0
  28. distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  29. distil-whisper_distil-large-v3_turbo/LICENSE_NOTICE.txt +7 -0
  30. distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  31. distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  32. distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/metadata.json +74 -0
  33. distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/model.mil +66 -0
  34. distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  35. distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  36. distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/coremldata.bin +3 -0
  37. distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/metadata.json +183 -0
  38. distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/model.mil +529 -0
  39. distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/weights/weight.bin +3 -0
  40. distil-whisper_distil-large-v3_turbo/config.json +1 -0
  41. distil-whisper_distil-large-v3_turbo/generation_config.json +1 -0
  42. openai_whisper-base.en/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  43. openai_whisper-base.en/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  44. openai_whisper-base.en/AudioEncoder.mlmodelc/metadata.json +91 -0
  45. openai_whisper-base.en/AudioEncoder.mlmodelc/model.mil +0 -0
  46. openai_whisper-base.en/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  47. openai_whisper-base.en/LICENSE_NOTICE.txt +7 -0
  48. openai_whisper-base.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  49. openai_whisper-base.en/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  50. openai_whisper-base.en/MelSpectrogram.mlmodelc/metadata.json +74 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
LICENSE DELETED
File without changes
LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
README.md CHANGED
@@ -1,5 +1,24 @@
1
- ---
2
- license: other
3
- license_name: argmax-fmod-license
4
- license_link: LICENSE
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: argmax-fmod-license
4
+ license_link: https://huggingface.co/argmaxinc/whisperkit-pro/blob/main/LICENSE_NOTICE.txt
5
+ pretty_name: "WhisperKit"
6
+ viewer: false
7
+ library_name: whisperkit
8
+ tags:
9
+ - whisper
10
+ - whisperkit
11
+ - coreml
12
+ - asr
13
+ - quantized
14
+ - automatic-speech-recognition
15
+ extra_gated_heading: "WhisperKit Pro is now in early access!"
16
+ extra_gated_description: "WhisperKit Pro is the commercial tier of [WhisperKit](https://github.com/argmaxinc/WhisperKit). Please submit your information below to request early access or directly send an email to [[email protected]](mailto:[email protected])"
17
+ extra_gated_fields:
18
+ Company: text
19
+ Work email: text
20
+ I acknowledge the license notice: checkbox
21
+ extra_gated_button_content: "Submit"
22
+ ---
23
+
24
+ # WhisperKit Pro
config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "whisperkit-coreml",
3
+ "version": "0.2",
4
+ "device_support": [
5
+ {
6
+ "identifiers": ["iPhone11", "iPhone12", "Watch7", "Watch8"],
7
+ "models": {
8
+ "default": "openai_whisper-tiny",
9
+ "supported": [
10
+ "openai_whisper-tiny",
11
+ "openai_whisper-tiny.en",
12
+ "openai_whisper-base",
13
+ "openai_whisper-base.en"
14
+ ]
15
+ }
16
+ },
17
+ {
18
+ "identifiers": ["iPhone13", "iPad13,18", "iPad13,1"],
19
+ "models": {
20
+ "default": "openai_whisper-base",
21
+ "supported": [
22
+ "openai_whisper-tiny",
23
+ "openai_whisper-tiny.en",
24
+ "openai_whisper-base",
25
+ "openai_whisper-base.en",
26
+ "openai_whisper-small",
27
+ "openai_whisper-small.en"
28
+ ]
29
+ }
30
+ },
31
+ {
32
+ "identifiers": [
33
+ "iPhone14",
34
+ "iPhone15",
35
+ "iPhone16",
36
+ "iPhone17",
37
+ "iPad14,1",
38
+ "iPad14,2"
39
+ ],
40
+ "models": {
41
+ "default": "openai_whisper-base",
42
+ "supported": [
43
+ "openai_whisper-tiny",
44
+ "openai_whisper-tiny.en",
45
+ "openai_whisper-base",
46
+ "openai_whisper-base.en",
47
+ "openai_whisper-small",
48
+ "openai_whisper-small.en",
49
+ ]
50
+ }
51
+ },
52
+ {
53
+ "identifiers": [
54
+ "Mac13",
55
+ "iMac21",
56
+ "MacBookAir10,1",
57
+ "MacBookPro17",
58
+ "MacBookPro18",
59
+ "Macmini9",
60
+ "iPad13,16",
61
+ "iPad13,4",
62
+ "iPad13,8"
63
+ ],
64
+ "models": {
65
+ "default": "openai_whisper-large-v3-v20240930",
66
+ "supported": [
67
+ "openai_whisper-tiny",
68
+ "openai_whisper-tiny.en",
69
+ "openai_whisper-base",
70
+ "openai_whisper-base.en",
71
+ "openai_whisper-small",
72
+ "openai_whisper-small.en",
73
+ "distil-whisper_distil-large-v3",
74
+ "openai_whisper-large-v3-v20240930",
75
+ ]
76
+ }
77
+ },
78
+ {
79
+ "identifiers": [
80
+ "Mac14",
81
+ "Mac15",
82
+ "Mac16",
83
+ "iPad14,3",
84
+ "iPad14,4",
85
+ "iPad14,5",
86
+ "iPad14,6",
87
+ "iPad14,8",
88
+ "iPad14,9",
89
+ "iPad14,10",
90
+ "iPad14,11",
91
+ "iPad16"
92
+ ],
93
+ "models": {
94
+ "default": "openai_whisper-large-v3-v20240930",
95
+ "supported": [
96
+ "openai_whisper-tiny",
97
+ "openai_whisper-tiny.en",
98
+ "openai_whisper-base",
99
+ "openai_whisper-base.en",
100
+ "openai_whisper-small",
101
+ "openai_whisper-small.en",
102
+ "distil-whisper_distil-large-v3",
103
+ "distil-whisper_distil-large-v3_turbo",
104
+ "openai_whisper-large-v3-v20240930",
105
+ "openai_whisper-large-v3-v20240930_turbo",
106
+ ]
107
+ }
108
+ }
109
+ ],
110
+ "model_checksums": {
111
+ }
112
+ }
distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e272b852871322d9a085dfa95707110046e749f525a0db0d8f277709fcde3f
3
+ size 243
distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49b36ed70ce2688383176bec4bd74a1a7716b505ddffd95d5f8eda4063ce6ae3
3
+ size 434
distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1280, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 2 × 1280 × 1 × 1536)",
21
+ "shortDescription" : "",
22
+ "shape" : "[2, 1280, 1, 1536]",
23
+ "name" : "encoder_attn_key_cache",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 2 × 1280 × 1 × 1536)",
31
+ "shortDescription" : "",
32
+ "shape" : "[2, 1280, 1, 1536]",
33
+ "name" : "encoder_attn_value_cache",
34
+ "type" : "MultiArray"
35
+ }
36
+ ],
37
+ "modelParameters" : [
38
+
39
+ ],
40
+ "specificationVersion" : 9,
41
+ "mlProgramOperationTypeHistogram" : {
42
+ "Ios18.reshape" : 128,
43
+ "Ios18.batchNorm" : 65,
44
+ "Ios18.softmax" : 32,
45
+ "Pad" : 2,
46
+ "Ios18.concat" : 2,
47
+ "Ios18.gelu" : 34,
48
+ "Ios18.layerNorm" : 65,
49
+ "Ios18.matmul" : 64,
50
+ "Ios18.conv" : 198,
51
+ "Ios18.mul" : 32,
52
+ "Ios18.add" : 65
53
+ },
54
+ "computePrecision" : "Mixed (Float16, Int32)",
55
+ "isUpdatable" : "0",
56
+ "stateSchema" : [
57
+
58
+ ],
59
+ "availability" : {
60
+ "macOS" : "15.0",
61
+ "tvOS" : "18.0",
62
+ "visionOS" : "2.0",
63
+ "watchOS" : "11.0",
64
+ "iOS" : "18.0",
65
+ "macCatalyst" : "18.0"
66
+ },
67
+ "modelType" : {
68
+ "name" : "MLModelType_mlProgram"
69
+ },
70
+ "userDefinedMetadata" : {
71
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
72
+ "com.github.apple.coremltools.version" : "8.0",
73
+ "com.github.apple.coremltools.source" : "torch==2.5.1"
74
+ },
75
+ "inputSchema" : [
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 128, 1, 3000]",
83
+ "name" : "melspectrogram_features",
84
+ "type" : "MultiArray"
85
+ }
86
+ ],
87
+ "generatedClassName" : "AudioEncoderStateful",
88
+ "method" : "predict"
89
+ }
90
+ ]
distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
distil-whisper_distil-large-v3/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b43a5d9e21e95067e0af8cf4b8fcbd16cc8e6f99993084f5e67cdf81bde16e79
3
+ size 1287087104
distil-whisper_distil-large-v3/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0980462db89a546e1e90888ea38e0a5ddf1f1fec84608802cdbb12f8a5cc7215
3
+ size 243
distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451a5796f1dafb1969fef7bac32cd7fcf51fc763d1e1826ee6211dd046ede15a
3
+ size 329
distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 128, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.mul" : 2,
23
+ "Ios18.square" : 2,
24
+ "Ios18.conv" : 2,
25
+ "Ios18.matmul" : 1,
26
+ "Ios18.expandDims" : 4,
27
+ "Ios18.sub" : 1,
28
+ "Ios18.log" : 1,
29
+ "Ios18.add" : 3,
30
+ "Ios18.sliceByIndex" : 1,
31
+ "Ios18.maximum" : 1,
32
+ "Ios18.squeeze" : 2,
33
+ "Ios18.reshape" : 2,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Pad" : 1
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
39
+ "isUpdatable" : "0",
40
+ "stateSchema" : [
41
+
42
+ ],
43
+ "availability" : {
44
+ "macOS" : "15.0",
45
+ "tvOS" : "18.0",
46
+ "visionOS" : "2.0",
47
+ "watchOS" : "11.0",
48
+ "iOS" : "18.0",
49
+ "macCatalyst" : "18.0"
50
+ },
51
+ "modelType" : {
52
+ "name" : "MLModelType_mlProgram"
53
+ },
54
+ "userDefinedMetadata" : {
55
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
56
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
57
+ "com.github.apple.coremltools.version" : "8.0"
58
+ },
59
+ "inputSchema" : [
60
+ {
61
+ "hasShapeFlexibility" : "0",
62
+ "isOptional" : "0",
63
+ "dataType" : "Float16",
64
+ "formattedType" : "MultiArray (Float16 480000)",
65
+ "shortDescription" : "",
66
+ "shape" : "[480000]",
67
+ "name" : "audio",
68
+ "type" : "MultiArray"
69
+ }
70
+ ],
71
+ "generatedClassName" : "MelSpectrogram",
72
+ "method" : "predict"
73
+ }
74
+ ]
distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios18>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = string("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = string("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = string("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ string input_3_mode_0 = const()[name = string("input_3_mode_0"), val = string("reflect")];
9
+ fp16 const_1_to_fp16 = const()[name = string("const_1_to_fp16"), val = fp16(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = string("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = string("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = string("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = string("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = string("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = string("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = string("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = string("expand_dims_4_cast_fp16")];
18
+ string conv_0_pad_type_0 = const()[name = string("conv_0_pad_type_0"), val = string("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = string("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = string("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ int32 conv_0_groups_0 = const()[name = string("conv_0_groups_0"), val = int32(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = string("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_0_cast_fp16")];
24
+ string conv_1_pad_type_0 = const()[name = string("conv_1_pad_type_0"), val = string("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = string("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = string("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ int32 conv_1_groups_0 = const()[name = string("conv_1_groups_0"), val = int32(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = string("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = string("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = string("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = string("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = string("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = string("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = string("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = string("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = string("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = string("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = string("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = string("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = string("magnitudes_cast_fp16")];
42
+ bool mel_spec_1_transpose_x_0 = const()[name = string("mel_spec_1_transpose_x_0"), val = bool(false)];
43
+ bool mel_spec_1_transpose_y_0 = const()[name = string("mel_spec_1_transpose_y_0"), val = bool(false)];
44
+ tensor<fp16, [128, 201]> mel_filters_to_fp16 = const()[name = string("mel_filters_to_fp16"), val = tensor<fp16, [128, 201]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(321856)))];
45
+ tensor<fp16, [128, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = string("mel_spec_1_cast_fp16")];
46
+ fp16 var_41_to_fp16 = const()[name = string("op_41_to_fp16"), val = fp16(0x1p-24)];
47
+ tensor<fp16, [128, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = string("mel_spec_cast_fp16")];
48
+ fp32 log_0_epsilon_0 = const()[name = string("log_0_epsilon_0"), val = fp32(0x1p-149)];
49
+ tensor<fp16, [128, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0, x = mel_spec_cast_fp16)[name = string("log_0_cast_fp16")];
50
+ fp16 mul_0_y_0_to_fp16 = const()[name = string("mul_0_y_0_to_fp16"), val = fp16(0x1.bccp-2)];
51
+ tensor<fp16, [128, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = string("mul_0_cast_fp16")];
52
+ bool var_44_keep_dims_0 = const()[name = string("op_44_keep_dims_0"), val = bool(false)];
53
+ fp16 var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = string("op_44_cast_fp16")];
54
+ fp16 var_46_to_fp16 = const()[name = string("op_46_to_fp16"), val = fp16(0x1p+3)];
55
+ fp16 var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = string("op_47_cast_fp16")];
56
+ tensor<fp16, [128, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = string("log_spec_3_cast_fp16")];
57
+ fp16 var_50_to_fp16 = const()[name = string("op_50_to_fp16"), val = fp16(0x1p+2)];
58
+ tensor<fp16, [128, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = string("op_51_cast_fp16")];
59
+ fp16 _inversed_log_spec_y_0_to_fp16 = const()[name = string("_inversed_log_spec_y_0_to_fp16"), val = fp16(0x1p-2)];
60
+ tensor<fp16, [128, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = string("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = string("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 128, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = string("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = string("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 128, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = string("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
distil-whisper_distil-large-v3/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:009d9fb8f6b589accfa08cebf1c712ef07c3405229ce3cfb3a57ee033c9d8a49
3
+ size 373376
distil-whisper_distil-large-v3/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77cb1b565a336e7fc01586698e50aa32d9a2a8f1ca5c439172564f4af0515f5d
3
+ size 243
distil-whisper_distil-large-v3/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a5e6f62b5ae897c8f846e22cacbe7d4f7d6bdbeb5f46366e2387f1082676b62
3
+ size 754
distil-whisper_distil-large-v3/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51866)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51866]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 2560, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 2560, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1536)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1536]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 9,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Ios18.expandDims" : 8,
53
+ "Ios18.softmax" : 4,
54
+ "Ios18.mul" : 8,
55
+ "Ios18.matmul" : 8,
56
+ "Ios18.batchNorm" : 7,
57
+ "Ios16.reduceMean" : 1,
58
+ "Split" : 2,
59
+ "Ios18.readState" : 5,
60
+ "Ios18.gather" : 2,
61
+ "Ios18.add" : 15,
62
+ "Ios18.layerNorm" : 7,
63
+ "Ios18.reshape" : 16,
64
+ "Ios18.linear" : 1,
65
+ "Ios18.conv" : 16,
66
+ "Ios18.gelu" : 2,
67
+ "Ios18.concat" : 3,
68
+ "Ios18.cast" : 1,
69
+ "Ios18.transpose" : 1,
70
+ "Ios18.sliceByIndex" : 44,
71
+ "Ios18.squeeze" : 1
72
+ },
73
+ "computePrecision" : "Mixed (Float16, Int32, UInt16)",
74
+ "isUpdatable" : "0",
75
+ "stateSchema" : [
76
+ {
77
+ "dataType" : "Float16",
78
+ "isOptional" : "0",
79
+ "formattedType" : "State (Float16 1 × 1536)",
80
+ "shortDescription" : "",
81
+ "shape" : "[1, 1536]",
82
+ "name" : "encoder_attn_key_padding_mask",
83
+ "type" : "State"
84
+ },
85
+ {
86
+ "dataType" : "Float16",
87
+ "isOptional" : "0",
88
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 1536)",
89
+ "shortDescription" : "",
90
+ "shape" : "[2, 1280, 1, 1536]",
91
+ "name" : "encoder_attn_key_cache",
92
+ "type" : "State"
93
+ },
94
+ {
95
+ "dataType" : "Float16",
96
+ "isOptional" : "0",
97
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 1536)",
98
+ "shortDescription" : "",
99
+ "shape" : "[2, 1280, 1, 1536]",
100
+ "name" : "encoder_attn_value_cache",
101
+ "type" : "State"
102
+ },
103
+ {
104
+ "dataType" : "Float16",
105
+ "isOptional" : "0",
106
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 448)",
107
+ "shortDescription" : "",
108
+ "shape" : "[2, 1280, 1, 448]",
109
+ "name" : "self_attn_key_cache",
110
+ "type" : "State"
111
+ },
112
+ {
113
+ "dataType" : "Float16",
114
+ "isOptional" : "0",
115
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 448)",
116
+ "shortDescription" : "",
117
+ "shape" : "[2, 1280, 1, 448]",
118
+ "name" : "self_attn_value_cache",
119
+ "type" : "State"
120
+ }
121
+ ],
122
+ "availability" : {
123
+ "macOS" : "15.0",
124
+ "tvOS" : "18.0",
125
+ "visionOS" : "2.0",
126
+ "watchOS" : "11.0",
127
+ "iOS" : "18.0",
128
+ "macCatalyst" : "18.0"
129
+ },
130
+ "modelType" : {
131
+ "name" : "MLModelType_mlProgram"
132
+ },
133
+ "userDefinedMetadata" : {
134
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
135
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
136
+ "com.github.apple.coremltools.version" : "8.0"
137
+ },
138
+ "inputSchema" : [
139
+ {
140
+ "hasShapeFlexibility" : "0",
141
+ "isOptional" : "0",
142
+ "dataType" : "Int32",
143
+ "formattedType" : "MultiArray (Int32 1)",
144
+ "shortDescription" : "",
145
+ "shape" : "[1]",
146
+ "name" : "input_ids",
147
+ "type" : "MultiArray"
148
+ },
149
+ {
150
+ "hasShapeFlexibility" : "0",
151
+ "isOptional" : "0",
152
+ "dataType" : "Int32",
153
+ "formattedType" : "MultiArray (Int32 1)",
154
+ "shortDescription" : "",
155
+ "shape" : "[1]",
156
+ "name" : "cache_length",
157
+ "type" : "MultiArray"
158
+ },
159
+ {
160
+ "hasShapeFlexibility" : "0",
161
+ "isOptional" : "0",
162
+ "dataType" : "Float16",
163
+ "formattedType" : "MultiArray (Float16 1 × 448)",
164
+ "shortDescription" : "",
165
+ "shape" : "[1, 448]",
166
+ "name" : "kv_cache_update_mask",
167
+ "type" : "MultiArray"
168
+ },
169
+ {
170
+ "hasShapeFlexibility" : "0",
171
+ "isOptional" : "0",
172
+ "dataType" : "Float16",
173
+ "formattedType" : "MultiArray (Float16 1 × 448)",
174
+ "shortDescription" : "",
175
+ "shape" : "[1, 448]",
176
+ "name" : "decoder_key_padding_mask",
177
+ "type" : "MultiArray"
178
+ }
179
+ ],
180
+ "generatedClassName" : "TextDecoderStateful",
181
+ "method" : "predict"
182
+ }
183
+ ]
distil-whisper_distil-large-v3/TextDecoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios18>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, state<tensor<fp16, [2, 1280, 1, 1536]>> encoder_attn_key_cache, state<tensor<fp16, [1, 1536]>> encoder_attn_key_padding_mask, state<tensor<fp16, [2, 1280, 1, 1536]>> encoder_attn_value_cache, tensor<int32, [1]> input_ids, tensor<fp16, [1, 448]> kv_cache_update_mask, state<tensor<fp16, [2, 1280, 1, 448]>> self_attn_key_cache, state<tensor<fp16, [2, 1280, 1, 448]>> self_attn_value_cache) {
5
+ int32 var_22_axis_0 = const()[name = string("op_22_axis_0"), val = int32(0)];
6
+ int32 var_22_batch_dims_0 = const()[name = string("op_22_batch_dims_0"), val = int32(0)];
7
+ bool var_22_validate_indices_0 = const()[name = string("op_22_validate_indices_0"), val = bool(false)];
8
+ tensor<fp16, [51866, 1280]> embed_tokens_weight_to_fp16 = const()[name = string("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51866, 1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
9
+ tensor<fp16, [1, 1280]> var_22_cast_fp16 = gather(axis = var_22_axis_0, batch_dims = var_22_batch_dims_0, indices = input_ids, validate_indices = var_22_validate_indices_0, x = embed_tokens_weight_to_fp16)[name = string("op_22_cast_fp16")];
10
+ int32 var_26_axis_0 = const()[name = string("op_26_axis_0"), val = int32(0)];
11
+ int32 var_26_batch_dims_0 = const()[name = string("op_26_batch_dims_0"), val = int32(0)];
12
+ bool var_26_validate_indices_0 = const()[name = string("op_26_validate_indices_0"), val = bool(false)];
13
+ tensor<fp16, [448, 1280]> embed_positions_weight_to_fp16 = const()[name = string("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(132777088)))];
14
+ string cache_length_to_uint16_dtype_0 = const()[name = string("cache_length_to_uint16_dtype_0"), val = string("uint16")];
15
+ tensor<uint16, [1]> cache_length_to_uint16 = cast(dtype = cache_length_to_uint16_dtype_0, x = cache_length)[name = string("cast_43")];
16
+ tensor<fp16, [1, 1280]> var_26_cast_fp16_cast_uint16 = gather(axis = var_26_axis_0, batch_dims = var_26_batch_dims_0, indices = cache_length_to_uint16, validate_indices = var_26_validate_indices_0, x = embed_positions_weight_to_fp16)[name = string("op_26_cast_fp16_cast_uint16")];
17
+ tensor<fp16, [1, 1280]> hidden_states_1_cast_fp16 = add(x = var_22_cast_fp16, y = var_26_cast_fp16_cast_uint16)[name = string("hidden_states_1_cast_fp16")];
18
+ tensor<int32, [1]> var_40_axes_0 = const()[name = string("op_40_axes_0"), val = tensor<int32, [1]>([2])];
19
+ tensor<fp16, [1, 1280, 1]> var_40_cast_fp16 = expand_dims(axes = var_40_axes_0, x = hidden_states_1_cast_fp16)[name = string("op_40_cast_fp16")];
20
+ tensor<int32, [1]> inputs_1_axes_0 = const()[name = string("inputs_1_axes_0"), val = tensor<int32, [1]>([3])];
21
+ tensor<fp16, [1, 1280, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_40_cast_fp16)[name = string("inputs_1_cast_fp16")];
22
+ tensor<fp16, [2, 1280, 1, 448]> read_state_0 = read_state(input = self_attn_key_cache)[name = string("read_state_0")];
23
+ tensor<int32, [2]> tile_0 = const()[name = string("tile_0"), val = tensor<int32, [2]>([1, 1])];
24
+ int32 var_45_axis_0 = const()[name = string("op_45_axis_0"), val = int32(0)];
25
+ tensor<fp16, [1, 1280, 1, 448]> var_45_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_45_cast_fp16_1 = split(axis = var_45_axis_0, split_sizes = tile_0, x = read_state_0)[name = string("op_45_cast_fp16")];
26
+ tensor<fp16, [2, 1280, 1, 448]> read_state_1 = read_state(input = self_attn_value_cache)[name = string("read_state_1")];
27
+ tensor<int32, [2]> tile_1 = const()[name = string("tile_1"), val = tensor<int32, [2]>([1, 1])];
28
+ int32 var_50_axis_0 = const()[name = string("op_50_axis_0"), val = int32(0)];
29
+ tensor<fp16, [1, 1280, 1, 448]> var_50_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_50_cast_fp16_1 = split(axis = var_50_axis_0, split_sizes = tile_1, x = read_state_1)[name = string("op_50_cast_fp16")];
30
+ tensor<fp16, [2, 1280, 1, 1536]> read_state_2 = read_state(input = encoder_attn_key_cache)[name = string("read_state_2")];
31
+ tensor<int32, [4]> obj_17_begin_0 = const()[name = string("obj_17_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
32
+ tensor<int32, [4]> obj_17_end_0 = const()[name = string("obj_17_end_0"), val = tensor<int32, [4]>([1, 1280, 1, 1536])];
33
+ tensor<bool, [4]> obj_17_end_mask_0 = const()[name = string("obj_17_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
34
+ tensor<fp16, [1, 1280, 1, 1536]> obj_17_cast_fp16 = slice_by_index(begin = obj_17_begin_0, end = obj_17_end_0, end_mask = obj_17_end_mask_0, x = read_state_2)[name = string("obj_17_cast_fp16")];
35
+ tensor<fp16, [2, 1280, 1, 1536]> read_state_3 = read_state(input = encoder_attn_value_cache)[name = string("read_state_3")];
36
+ tensor<int32, [4]> obj_19_begin_0 = const()[name = string("obj_19_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
37
+ tensor<int32, [4]> obj_19_end_0 = const()[name = string("obj_19_end_0"), val = tensor<int32, [4]>([1, 1280, 1, 1536])];
38
+ tensor<bool, [4]> obj_19_end_mask_0 = const()[name = string("obj_19_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
39
+ tensor<fp16, [1, 1280, 1, 1536]> obj_19_cast_fp16 = slice_by_index(begin = obj_19_begin_0, end = obj_19_end_0, end_mask = obj_19_end_mask_0, x = read_state_3)[name = string("obj_19_cast_fp16")];
40
+ int32 var_68 = const()[name = string("op_68"), val = int32(3)];
41
+ tensor<int32, [1]> out_1_axes_0 = const()[name = string("out_1_axes_0"), val = tensor<int32, [1]>([1])];
42
+ fp16 var_93_to_fp16 = const()[name = string("op_93_to_fp16"), val = fp16(0x1.5p-17)];
43
+ tensor<fp16, [1, 1280, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_93_to_fp16, x = inputs_1_cast_fp16)[name = string("out_1_cast_fp16")];
44
+ tensor<fp16, [1280]> obj_5_mean_0_to_fp16 = const()[name = string("obj_5_mean_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133924032)))];
45
+ tensor<fp16, [1280]> obj_5_variance_0_to_fp16 = const()[name = string("obj_5_variance_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133926656)))];
46
+ tensor<fp16, [1280]> obj_5_gamma_0_to_fp16 = const()[name = string("obj_5_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133929280)))];
47
+ tensor<fp16, [1280]> obj_5_beta_0_to_fp16 = const()[name = string("obj_5_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133931904)))];
48
+ fp16 obj_5_epsilon_0_to_fp16 = const()[name = string("obj_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
49
+ tensor<fp16, [1, 1280, 1, 1]> obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_1_cast_fp16)[name = string("obj_5_cast_fp16")];
50
+ string query_1_pad_type_0 = const()[name = string("query_1_pad_type_0"), val = string("valid")];
51
+ tensor<int32, [2]> query_1_strides_0 = const()[name = string("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
52
+ tensor<int32, [4]> query_1_pad_0 = const()[name = string("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
53
+ tensor<int32, [2]> query_1_dilations_0 = const()[name = string("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
54
+ int32 query_1_groups_0 = const()[name = string("query_1_groups_0"), val = int32(1)];
55
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133934528)))];
56
+ tensor<fp16, [1280]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(137211392)))];
57
+ tensor<fp16, [1, 1280, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("query_1_cast_fp16")];
58
+ string current_key_1_pad_type_0 = const()[name = string("current_key_1_pad_type_0"), val = string("valid")];
59
+ tensor<int32, [2]> current_key_1_strides_0 = const()[name = string("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
60
+ tensor<int32, [4]> current_key_1_pad_0 = const()[name = string("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
61
+ tensor<int32, [2]> current_key_1_dilations_0 = const()[name = string("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
62
+ int32 current_key_1_groups_0 = const()[name = string("current_key_1_groups_0"), val = int32(1)];
63
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(137214016)))];
64
+ tensor<fp16, [1, 1280, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_key_1_cast_fp16")];
65
+ string current_value_1_pad_type_0 = const()[name = string("current_value_1_pad_type_0"), val = string("valid")];
66
+ tensor<int32, [2]> current_value_1_strides_0 = const()[name = string("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
67
+ tensor<int32, [4]> current_value_1_pad_0 = const()[name = string("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
68
+ tensor<int32, [2]> current_value_1_dilations_0 = const()[name = string("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
69
+ int32 current_value_1_groups_0 = const()[name = string("current_value_1_groups_0"), val = int32(1)];
70
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(140490880)))];
71
+ tensor<fp16, [1280]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(143767744)))];
72
+ tensor<fp16, [1, 1280, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_value_1_cast_fp16")];
73
+ tensor<int32, [1]> var_128_axes_0 = const()[name = string("op_128_axes_0"), val = tensor<int32, [1]>([1])];
74
+ tensor<fp16, [1, 1, 448]> var_128_cast_fp16 = expand_dims(axes = var_128_axes_0, x = kv_cache_update_mask)[name = string("op_128_cast_fp16")];
75
+ tensor<int32, [1]> var_129_axes_0 = const()[name = string("op_129_axes_0"), val = tensor<int32, [1]>([2])];
76
+ tensor<fp16, [1, 1, 1, 448]> var_129_cast_fp16 = expand_dims(axes = var_129_axes_0, x = var_128_cast_fp16)[name = string("op_129_cast_fp16")];
77
+ tensor<fp16, [1, 1280, 1, 448]> var_131_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_129_cast_fp16)[name = string("op_131_cast_fp16")];
78
+ tensor<fp16, [1, 1280, 1, 448]> key_1_cast_fp16 = add(x = var_45_cast_fp16_0, y = var_131_cast_fp16)[name = string("key_1_cast_fp16")];
79
+ tensor<fp16, [1, 1280, 1, 448]> var_133_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_129_cast_fp16)[name = string("op_133_cast_fp16")];
80
+ tensor<fp16, [1, 1280, 1, 448]> value_1_cast_fp16 = add(x = var_50_cast_fp16_0, y = var_133_cast_fp16)[name = string("value_1_cast_fp16")];
81
+ tensor<int32, [4]> var_136 = const()[name = string("op_136"), val = tensor<int32, [4]>([1, 20, 64, -1])];
82
+ tensor<fp16, [1, 20, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_136, x = query_1_cast_fp16)[name = string("mh_q_1_cast_fp16")];
83
+ fp16 var_138_to_fp16 = const()[name = string("op_138_to_fp16"), val = fp16(0x1p-3)];
84
+ tensor<fp16, [1, 20, 64, 1]> var_139_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_138_to_fp16)[name = string("op_139_cast_fp16")];
85
+ tensor<int32, [4]> var_140 = const()[name = string("op_140"), val = tensor<int32, [4]>([1, 20, 64, -1])];
86
+ tensor<fp16, [1, 20, 64, 448]> var_141_cast_fp16 = reshape(shape = var_140, x = key_1_cast_fp16)[name = string("op_141_cast_fp16")];
87
+ bool mh_w_1_transpose_x_0 = const()[name = string("mh_w_1_transpose_x_0"), val = bool(true)];
88
+ bool mh_w_1_transpose_y_0 = const()[name = string("mh_w_1_transpose_y_0"), val = bool(false)];
89
+ tensor<fp16, [1, 20, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_139_cast_fp16, y = var_141_cast_fp16)[name = string("mh_w_1_cast_fp16")];
90
+ tensor<int32, [1]> var_145_axes_0 = const()[name = string("op_145_axes_0"), val = tensor<int32, [1]>([1])];
91
+ tensor<fp16, [1, 1, 448]> var_145_cast_fp16 = expand_dims(axes = var_145_axes_0, x = decoder_key_padding_mask)[name = string("op_145_cast_fp16")];
92
+ tensor<int32, [1]> var_146_axes_0 = const()[name = string("op_146_axes_0"), val = tensor<int32, [1]>([2])];
93
+ tensor<fp16, [1, 1, 1, 448]> var_146_cast_fp16 = expand_dims(axes = var_146_axes_0, x = var_145_cast_fp16)[name = string("op_146_cast_fp16")];
94
+ tensor<fp16, [1, 20, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_146_cast_fp16)[name = string("mh_w_3_cast_fp16")];
95
+ tensor<fp16, [1, 20, 1, 448]> var_149_cast_fp16 = softmax(axis = var_68, x = mh_w_3_cast_fp16)[name = string("op_149_cast_fp16")];
96
+ tensor<int32, [4]> var_150 = const()[name = string("op_150"), val = tensor<int32, [4]>([1, 20, 64, -1])];
97
+ tensor<fp16, [1, 20, 64, 448]> var_151_cast_fp16 = reshape(shape = var_150, x = value_1_cast_fp16)[name = string("op_151_cast_fp16")];
98
+ bool attn_1_transpose_x_0 = const()[name = string("attn_1_transpose_x_0"), val = bool(false)];
99
+ bool attn_1_transpose_y_0 = const()[name = string("attn_1_transpose_y_0"), val = bool(true)];
100
+ tensor<fp16, [1, 20, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_151_cast_fp16, y = var_149_cast_fp16)[name = string("attn_1_cast_fp16")];
101
+ tensor<int32, [4]> var_154 = const()[name = string("op_154"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
102
+ tensor<fp16, [1, 1280, 1, 1]> input_1_cast_fp16 = reshape(shape = var_154, x = attn_1_cast_fp16)[name = string("input_1_cast_fp16")];
103
+ string obj_11_pad_type_0 = const()[name = string("obj_11_pad_type_0"), val = string("valid")];
104
+ tensor<int32, [2]> obj_11_strides_0 = const()[name = string("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
105
+ tensor<int32, [4]> obj_11_pad_0 = const()[name = string("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
106
+ tensor<int32, [2]> obj_11_dilations_0 = const()[name = string("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
107
+ int32 obj_11_groups_0 = const()[name = string("obj_11_groups_0"), val = int32(1)];
108
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(143770368)))];
109
+ tensor<fp16, [1280]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147047232)))];
110
+ tensor<fp16, [1, 1280, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = string("obj_11_cast_fp16")];
111
+ tensor<fp16, [1, 1280, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_11_cast_fp16)[name = string("inputs_3_cast_fp16")];
112
+ tensor<int32, [1]> out_3_axes_0 = const()[name = string("out_3_axes_0"), val = tensor<int32, [1]>([1])];
113
+ fp16 var_176_to_fp16 = const()[name = string("op_176_to_fp16"), val = fp16(0x1.5p-17)];
114
+ tensor<fp16, [1, 1280, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_176_to_fp16, x = inputs_3_cast_fp16)[name = string("out_3_cast_fp16")];
115
+ tensor<fp16, [1280]> obj_13_gamma_0_to_fp16 = const()[name = string("obj_13_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147049856)))];
116
+ tensor<fp16, [1280]> obj_13_beta_0_to_fp16 = const()[name = string("obj_13_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147052480)))];
117
+ fp16 obj_13_epsilon_0_to_fp16 = const()[name = string("obj_13_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
118
+ tensor<fp16, [1, 1280, 1, 1]> obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_3_cast_fp16)[name = string("obj_13_cast_fp16")];
119
+ string query_3_pad_type_0 = const()[name = string("query_3_pad_type_0"), val = string("valid")];
120
+ tensor<int32, [2]> query_3_strides_0 = const()[name = string("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
121
+ tensor<int32, [4]> query_3_pad_0 = const()[name = string("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
122
+ tensor<int32, [2]> query_3_dilations_0 = const()[name = string("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
123
+ int32 query_3_groups_0 = const()[name = string("query_3_groups_0"), val = int32(1)];
124
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147055104)))];
125
+ tensor<fp16, [1280]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(150331968)))];
126
+ tensor<fp16, [1, 1280, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = string("query_3_cast_fp16")];
127
+ tensor<int32, [4]> var_196 = const()[name = string("op_196"), val = tensor<int32, [4]>([1, 20, 64, -1])];
128
+ tensor<fp16, [1, 20, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_196, x = query_3_cast_fp16)[name = string("mh_q_3_cast_fp16")];
129
+ fp16 var_198_to_fp16 = const()[name = string("op_198_to_fp16"), val = fp16(0x1p-3)];
130
+ tensor<fp16, [1, 20, 64, 1]> var_199_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_198_to_fp16)[name = string("op_199_cast_fp16")];
131
+ tensor<int32, [4]> var_200 = const()[name = string("op_200"), val = tensor<int32, [4]>([1, 20, 64, -1])];
132
+ tensor<fp16, [1, 20, 64, 1536]> var_201_cast_fp16 = reshape(shape = var_200, x = obj_17_cast_fp16)[name = string("op_201_cast_fp16")];
133
+ bool mh_w_5_transpose_x_0 = const()[name = string("mh_w_5_transpose_x_0"), val = bool(true)];
134
+ bool mh_w_5_transpose_y_0 = const()[name = string("mh_w_5_transpose_y_0"), val = bool(false)];
135
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_199_cast_fp16, y = var_201_cast_fp16)[name = string("mh_w_5_cast_fp16")];
136
+ tensor<fp16, [1, 1536]> read_state_4 = read_state(input = encoder_attn_key_padding_mask)[name = string("read_state_4")];
137
+ tensor<int32, [1]> var_205_axes_0 = const()[name = string("op_205_axes_0"), val = tensor<int32, [1]>([1])];
138
+ tensor<fp16, [1, 1, 1536]> var_205_cast_fp16 = expand_dims(axes = var_205_axes_0, x = read_state_4)[name = string("op_205_cast_fp16")];
139
+ tensor<int32, [1]> var_206_axes_0 = const()[name = string("op_206_axes_0"), val = tensor<int32, [1]>([2])];
140
+ tensor<fp16, [1, 1, 1, 1536]> var_206_cast_fp16 = expand_dims(axes = var_206_axes_0, x = var_205_cast_fp16)[name = string("op_206_cast_fp16")];
141
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_7_cast_fp16 = add(x = mh_w_5_cast_fp16, y = var_206_cast_fp16)[name = string("mh_w_7_cast_fp16")];
142
+ tensor<fp16, [1, 20, 1, 1536]> obj_23_cast_fp16 = softmax(axis = var_68, x = mh_w_7_cast_fp16)[name = string("obj_23_cast_fp16")];
143
+ tensor<int32, [4]> var_210 = const()[name = string("op_210"), val = tensor<int32, [4]>([1, 20, 64, -1])];
144
+ tensor<fp16, [1, 20, 64, 1536]> var_211_cast_fp16 = reshape(shape = var_210, x = obj_19_cast_fp16)[name = string("op_211_cast_fp16")];
145
+ bool attn_3_transpose_x_0 = const()[name = string("attn_3_transpose_x_0"), val = bool(false)];
146
+ bool attn_3_transpose_y_0 = const()[name = string("attn_3_transpose_y_0"), val = bool(true)];
147
+ tensor<fp16, [1, 20, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_211_cast_fp16, y = obj_23_cast_fp16)[name = string("attn_3_cast_fp16")];
148
+ tensor<int32, [4]> var_214 = const()[name = string("op_214"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
149
+ tensor<fp16, [1, 1280, 1, 1]> input_3_cast_fp16 = reshape(shape = var_214, x = attn_3_cast_fp16)[name = string("input_3_cast_fp16")];
150
+ string obj_21_pad_type_0 = const()[name = string("obj_21_pad_type_0"), val = string("valid")];
151
+ tensor<int32, [2]> obj_21_strides_0 = const()[name = string("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
152
+ tensor<int32, [4]> obj_21_pad_0 = const()[name = string("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
153
+ tensor<int32, [2]> obj_21_dilations_0 = const()[name = string("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
154
+ int32 obj_21_groups_0 = const()[name = string("obj_21_groups_0"), val = int32(1)];
155
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(150334592)))];
156
+ tensor<fp16, [1280]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153611456)))];
157
+ tensor<fp16, [1, 1280, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = string("obj_21_cast_fp16")];
158
+ tensor<fp16, [1, 1280, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_21_cast_fp16)[name = string("inputs_5_cast_fp16")];
159
+ tensor<int32, [1]> out_5_axes_0 = const()[name = string("out_5_axes_0"), val = tensor<int32, [1]>([1])];
160
+ fp16 var_232_to_fp16 = const()[name = string("op_232_to_fp16"), val = fp16(0x1.5p-17)];
161
+ tensor<fp16, [1, 1280, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_232_to_fp16, x = inputs_5_cast_fp16)[name = string("out_5_cast_fp16")];
162
+ tensor<fp16, [1280]> input_5_gamma_0_to_fp16 = const()[name = string("input_5_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153614080)))];
163
+ tensor<fp16, [1280]> input_5_beta_0_to_fp16 = const()[name = string("input_5_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153616704)))];
164
+ fp16 input_5_epsilon_0_to_fp16 = const()[name = string("input_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
165
+ tensor<fp16, [1, 1280, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_5_cast_fp16)[name = string("input_5_cast_fp16")];
166
+ string input_7_pad_type_0 = const()[name = string("input_7_pad_type_0"), val = string("valid")];
167
+ tensor<int32, [2]> input_7_strides_0 = const()[name = string("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
168
+ tensor<int32, [4]> input_7_pad_0 = const()[name = string("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
169
+ tensor<int32, [2]> input_7_dilations_0 = const()[name = string("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
170
+ int32 input_7_groups_0 = const()[name = string("input_7_groups_0"), val = int32(1)];
171
+ tensor<fp16, [5120, 1280, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = string("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153619328)))];
172
+ tensor<fp16, [5120]> layers_0_fc1_bias_to_fp16 = const()[name = string("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(166726592)))];
173
+ tensor<fp16, [1, 5120, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = string("input_7_cast_fp16")];
174
+ string input_9_mode_0 = const()[name = string("input_9_mode_0"), val = string("EXACT")];
175
+ tensor<fp16, [1, 5120, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = string("input_9_cast_fp16")];
176
+ string hidden_states_3_pad_type_0 = const()[name = string("hidden_states_3_pad_type_0"), val = string("valid")];
177
+ tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = string("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
178
+ tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = string("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
179
+ tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = string("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
180
+ int32 hidden_states_3_groups_0 = const()[name = string("hidden_states_3_groups_0"), val = int32(1)];
181
+ tensor<fp16, [1280, 5120, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = string("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(166736896)))];
182
+ tensor<fp16, [1280]> layers_0_fc2_bias_to_fp16 = const()[name = string("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179844160)))];
183
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = string("hidden_states_3_cast_fp16")];
184
+ tensor<fp16, [1, 1280, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = string("inputs_7_cast_fp16")];
185
+ tensor<int32, [4]> obj_35_begin_0 = const()[name = string("obj_35_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])];
186
+ tensor<int32, [4]> obj_35_end_0 = const()[name = string("obj_35_end_0"), val = tensor<int32, [4]>([2, 1280, 1, 1536])];
187
+ tensor<bool, [4]> obj_35_end_mask_0 = const()[name = string("obj_35_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
188
+ tensor<fp16, [1, 1280, 1, 1536]> obj_35_cast_fp16 = slice_by_index(begin = obj_35_begin_0, end = obj_35_end_0, end_mask = obj_35_end_mask_0, x = read_state_2)[name = string("obj_35_cast_fp16")];
189
+ tensor<int32, [4]> obj_37_begin_0 = const()[name = string("obj_37_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])];
190
+ tensor<int32, [4]> obj_37_end_0 = const()[name = string("obj_37_end_0"), val = tensor<int32, [4]>([2, 1280, 1, 1536])];
191
+ tensor<bool, [4]> obj_37_end_mask_0 = const()[name = string("obj_37_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
192
+ tensor<fp16, [1, 1280, 1, 1536]> obj_37_cast_fp16 = slice_by_index(begin = obj_37_begin_0, end = obj_37_end_0, end_mask = obj_37_end_mask_0, x = read_state_3)[name = string("obj_37_cast_fp16")];
193
+ int32 var_277 = const()[name = string("op_277"), val = int32(3)];
194
+ tensor<int32, [1]> out_7_axes_0 = const()[name = string("out_7_axes_0"), val = tensor<int32, [1]>([1])];
195
+ fp16 var_302_to_fp16 = const()[name = string("op_302_to_fp16"), val = fp16(0x1.5p-17)];
196
+ tensor<fp16, [1, 1280, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_302_to_fp16, x = inputs_7_cast_fp16)[name = string("out_7_cast_fp16")];
197
+ tensor<fp16, [1280]> obj_25_gamma_0_to_fp16 = const()[name = string("obj_25_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179846784)))];
198
+ tensor<fp16, [1280]> obj_25_beta_0_to_fp16 = const()[name = string("obj_25_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179849408)))];
199
+ fp16 obj_25_epsilon_0_to_fp16 = const()[name = string("obj_25_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
200
+ tensor<fp16, [1, 1280, 1, 1]> obj_25_cast_fp16 = batch_norm(beta = obj_25_beta_0_to_fp16, epsilon = obj_25_epsilon_0_to_fp16, gamma = obj_25_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_7_cast_fp16)[name = string("obj_25_cast_fp16")];
201
+ string query_5_pad_type_0 = const()[name = string("query_5_pad_type_0"), val = string("valid")];
202
+ tensor<int32, [2]> query_5_strides_0 = const()[name = string("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
203
+ tensor<int32, [4]> query_5_pad_0 = const()[name = string("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
204
+ tensor<int32, [2]> query_5_dilations_0 = const()[name = string("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
205
+ int32 query_5_groups_0 = const()[name = string("query_5_groups_0"), val = int32(1)];
206
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179852032)))];
207
+ tensor<fp16, [1280]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(183128896)))];
208
+ tensor<fp16, [1, 1280, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("query_5_cast_fp16")];
209
+ string current_key_pad_type_0 = const()[name = string("current_key_pad_type_0"), val = string("valid")];
210
+ tensor<int32, [2]> current_key_strides_0 = const()[name = string("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])];
211
+ tensor<int32, [4]> current_key_pad_0 = const()[name = string("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
212
+ tensor<int32, [2]> current_key_dilations_0 = const()[name = string("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])];
213
+ int32 current_key_groups_0 = const()[name = string("current_key_groups_0"), val = int32(1)];
214
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(183131520)))];
215
+ tensor<fp16, [1, 1280, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_key_cast_fp16")];
216
+ string current_value_pad_type_0 = const()[name = string("current_value_pad_type_0"), val = string("valid")];
217
+ tensor<int32, [2]> current_value_strides_0 = const()[name = string("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])];
218
+ tensor<int32, [4]> current_value_pad_0 = const()[name = string("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
219
+ tensor<int32, [2]> current_value_dilations_0 = const()[name = string("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])];
220
+ int32 current_value_groups_0 = const()[name = string("current_value_groups_0"), val = int32(1)];
221
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(186408384)))];
222
+ tensor<fp16, [1280]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(189685248)))];
223
+ tensor<fp16, [1, 1280, 1, 1]> current_value_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_value_cast_fp16")];
224
+ tensor<fp16, [1, 1280, 1, 448]> var_340_cast_fp16 = mul(x = current_key_cast_fp16, y = var_129_cast_fp16)[name = string("op_340_cast_fp16")];
225
+ tensor<fp16, [1, 1280, 1, 448]> key_cast_fp16 = add(x = var_45_cast_fp16_1, y = var_340_cast_fp16)[name = string("key_cast_fp16")];
226
+ tensor<fp16, [1, 1280, 1, 448]> var_342_cast_fp16 = mul(x = current_value_cast_fp16, y = var_129_cast_fp16)[name = string("op_342_cast_fp16")];
227
+ tensor<fp16, [1, 1280, 1, 448]> value_cast_fp16 = add(x = var_50_cast_fp16_1, y = var_342_cast_fp16)[name = string("value_cast_fp16")];
228
+ tensor<int32, [4]> var_345 = const()[name = string("op_345"), val = tensor<int32, [4]>([1, 20, 64, -1])];
229
+ tensor<fp16, [1, 20, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_345, x = query_5_cast_fp16)[name = string("mh_q_5_cast_fp16")];
230
+ fp16 var_347_to_fp16 = const()[name = string("op_347_to_fp16"), val = fp16(0x1p-3)];
231
+ tensor<fp16, [1, 20, 64, 1]> var_348_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_347_to_fp16)[name = string("op_348_cast_fp16")];
232
+ tensor<int32, [4]> var_349 = const()[name = string("op_349"), val = tensor<int32, [4]>([1, 20, 64, -1])];
233
+ tensor<fp16, [1, 20, 64, 448]> var_350_cast_fp16 = reshape(shape = var_349, x = key_cast_fp16)[name = string("op_350_cast_fp16")];
234
+ bool mh_w_9_transpose_x_0 = const()[name = string("mh_w_9_transpose_x_0"), val = bool(true)];
235
+ bool mh_w_9_transpose_y_0 = const()[name = string("mh_w_9_transpose_y_0"), val = bool(false)];
236
+ tensor<fp16, [1, 20, 1, 448]> mh_w_9_cast_fp16 = matmul(transpose_x = mh_w_9_transpose_x_0, transpose_y = mh_w_9_transpose_y_0, x = var_348_cast_fp16, y = var_350_cast_fp16)[name = string("mh_w_9_cast_fp16")];
237
+ tensor<fp16, [1, 20, 1, 448]> mh_w_11_cast_fp16 = add(x = mh_w_9_cast_fp16, y = var_146_cast_fp16)[name = string("mh_w_11_cast_fp16")];
238
+ tensor<fp16, [1, 20, 1, 448]> var_358_cast_fp16 = softmax(axis = var_277, x = mh_w_11_cast_fp16)[name = string("op_358_cast_fp16")];
239
+ tensor<int32, [4]> var_359 = const()[name = string("op_359"), val = tensor<int32, [4]>([1, 20, 64, -1])];
240
+ tensor<fp16, [1, 20, 64, 448]> var_360_cast_fp16 = reshape(shape = var_359, x = value_cast_fp16)[name = string("op_360_cast_fp16")];
241
+ bool attn_5_transpose_x_0 = const()[name = string("attn_5_transpose_x_0"), val = bool(false)];
242
+ bool attn_5_transpose_y_0 = const()[name = string("attn_5_transpose_y_0"), val = bool(true)];
243
+ tensor<fp16, [1, 20, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_360_cast_fp16, y = var_358_cast_fp16)[name = string("attn_5_cast_fp16")];
244
+ tensor<int32, [4]> var_363 = const()[name = string("op_363"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
245
+ tensor<fp16, [1, 1280, 1, 1]> input_11_cast_fp16 = reshape(shape = var_363, x = attn_5_cast_fp16)[name = string("input_11_cast_fp16")];
246
+ string obj_31_pad_type_0 = const()[name = string("obj_31_pad_type_0"), val = string("valid")];
247
+ tensor<int32, [2]> obj_31_strides_0 = const()[name = string("obj_31_strides_0"), val = tensor<int32, [2]>([1, 1])];
248
+ tensor<int32, [4]> obj_31_pad_0 = const()[name = string("obj_31_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
249
+ tensor<int32, [2]> obj_31_dilations_0 = const()[name = string("obj_31_dilations_0"), val = tensor<int32, [2]>([1, 1])];
250
+ int32 obj_31_groups_0 = const()[name = string("obj_31_groups_0"), val = int32(1)];
251
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(189687872)))];
252
+ tensor<fp16, [1280]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192964736)))];
253
+ tensor<fp16, [1, 1280, 1, 1]> obj_31_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_31_dilations_0, groups = obj_31_groups_0, pad = obj_31_pad_0, pad_type = obj_31_pad_type_0, strides = obj_31_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = string("obj_31_cast_fp16")];
254
+ tensor<fp16, [1, 1280, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_31_cast_fp16)[name = string("inputs_9_cast_fp16")];
255
+ tensor<int32, [1]> out_9_axes_0 = const()[name = string("out_9_axes_0"), val = tensor<int32, [1]>([1])];
256
+ fp16 var_385_to_fp16 = const()[name = string("op_385_to_fp16"), val = fp16(0x1.5p-17)];
257
+ tensor<fp16, [1, 1280, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_385_to_fp16, x = inputs_9_cast_fp16)[name = string("out_9_cast_fp16")];
258
+ tensor<fp16, [1280]> obj_33_gamma_0_to_fp16 = const()[name = string("obj_33_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192967360)))];
259
+ tensor<fp16, [1280]> obj_33_beta_0_to_fp16 = const()[name = string("obj_33_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192969984)))];
260
+ fp16 obj_33_epsilon_0_to_fp16 = const()[name = string("obj_33_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
261
+ tensor<fp16, [1, 1280, 1, 1]> obj_33_cast_fp16 = batch_norm(beta = obj_33_beta_0_to_fp16, epsilon = obj_33_epsilon_0_to_fp16, gamma = obj_33_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_9_cast_fp16)[name = string("obj_33_cast_fp16")];
262
+ string query_pad_type_0 = const()[name = string("query_pad_type_0"), val = string("valid")];
263
+ tensor<int32, [2]> query_strides_0 = const()[name = string("query_strides_0"), val = tensor<int32, [2]>([1, 1])];
264
+ tensor<int32, [4]> query_pad_0 = const()[name = string("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
265
+ tensor<int32, [2]> query_dilations_0 = const()[name = string("query_dilations_0"), val = tensor<int32, [2]>([1, 1])];
266
+ int32 query_groups_0 = const()[name = string("query_groups_0"), val = int32(1)];
267
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192972608)))];
268
+ tensor<fp16, [1280]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(196249472)))];
269
+ tensor<fp16, [1, 1280, 1, 1]> query_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = string("query_cast_fp16")];
270
+ tensor<int32, [4]> var_405 = const()[name = string("op_405"), val = tensor<int32, [4]>([1, 20, 64, -1])];
271
+ tensor<fp16, [1, 20, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_405, x = query_cast_fp16)[name = string("mh_q_cast_fp16")];
272
+ fp16 var_407_to_fp16 = const()[name = string("op_407_to_fp16"), val = fp16(0x1p-3)];
273
+ tensor<fp16, [1, 20, 64, 1]> var_408_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_407_to_fp16)[name = string("op_408_cast_fp16")];
274
+ tensor<int32, [4]> var_409 = const()[name = string("op_409"), val = tensor<int32, [4]>([1, 20, 64, -1])];
275
+ tensor<fp16, [1, 20, 64, 1536]> var_410_cast_fp16 = reshape(shape = var_409, x = obj_35_cast_fp16)[name = string("op_410_cast_fp16")];
276
+ bool mh_w_13_transpose_x_0 = const()[name = string("mh_w_13_transpose_x_0"), val = bool(true)];
277
+ bool mh_w_13_transpose_y_0 = const()[name = string("mh_w_13_transpose_y_0"), val = bool(false)];
278
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_408_cast_fp16, y = var_410_cast_fp16)[name = string("mh_w_13_cast_fp16")];
279
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_206_cast_fp16)[name = string("mh_w_cast_fp16")];
280
+ tensor<fp16, [1, 20, 1, 1536]> obj_41_cast_fp16 = softmax(axis = var_277, x = mh_w_cast_fp16)[name = string("obj_41_cast_fp16")];
281
+ tensor<int32, [4]> var_419 = const()[name = string("op_419"), val = tensor<int32, [4]>([1, 20, 64, -1])];
282
+ tensor<fp16, [1, 20, 64, 1536]> var_420_cast_fp16 = reshape(shape = var_419, x = obj_37_cast_fp16)[name = string("op_420_cast_fp16")];
283
+ bool attn_transpose_x_0 = const()[name = string("attn_transpose_x_0"), val = bool(false)];
284
+ bool attn_transpose_y_0 = const()[name = string("attn_transpose_y_0"), val = bool(true)];
285
+ tensor<fp16, [1, 20, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_420_cast_fp16, y = obj_41_cast_fp16)[name = string("attn_cast_fp16")];
286
+ tensor<int32, [4]> var_423 = const()[name = string("op_423"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
287
+ tensor<fp16, [1, 1280, 1, 1]> input_13_cast_fp16 = reshape(shape = var_423, x = attn_cast_fp16)[name = string("input_13_cast_fp16")];
288
+ string obj_39_pad_type_0 = const()[name = string("obj_39_pad_type_0"), val = string("valid")];
289
+ tensor<int32, [2]> obj_39_strides_0 = const()[name = string("obj_39_strides_0"), val = tensor<int32, [2]>([1, 1])];
290
+ tensor<int32, [4]> obj_39_pad_0 = const()[name = string("obj_39_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
291
+ tensor<int32, [2]> obj_39_dilations_0 = const()[name = string("obj_39_dilations_0"), val = tensor<int32, [2]>([1, 1])];
292
+ int32 obj_39_groups_0 = const()[name = string("obj_39_groups_0"), val = int32(1)];
293
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(196252096)))];
294
+ tensor<fp16, [1280]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199528960)))];
295
+ tensor<fp16, [1, 1280, 1, 1]> obj_39_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_39_dilations_0, groups = obj_39_groups_0, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = obj_39_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = string("obj_39_cast_fp16")];
296
+ tensor<fp16, [1, 1280, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_39_cast_fp16)[name = string("inputs_11_cast_fp16")];
297
+ tensor<int32, [1]> out_11_axes_0 = const()[name = string("out_11_axes_0"), val = tensor<int32, [1]>([1])];
298
+ fp16 var_444_to_fp16 = const()[name = string("op_444_to_fp16"), val = fp16(0x1.5p-17)];
299
+ tensor<fp16, [1, 1280, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_444_to_fp16, x = inputs_11_cast_fp16)[name = string("out_11_cast_fp16")];
300
+ tensor<fp16, [1280]> input_15_gamma_0_to_fp16 = const()[name = string("input_15_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199531584)))];
301
+ tensor<fp16, [1280]> input_15_beta_0_to_fp16 = const()[name = string("input_15_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199534208)))];
302
+ fp16 input_15_epsilon_0_to_fp16 = const()[name = string("input_15_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
303
+ tensor<fp16, [1, 1280, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_11_cast_fp16)[name = string("input_15_cast_fp16")];
304
+ string input_17_pad_type_0 = const()[name = string("input_17_pad_type_0"), val = string("valid")];
305
+ tensor<int32, [2]> input_17_strides_0 = const()[name = string("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])];
306
+ tensor<int32, [4]> input_17_pad_0 = const()[name = string("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
307
+ tensor<int32, [2]> input_17_dilations_0 = const()[name = string("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])];
308
+ int32 input_17_groups_0 = const()[name = string("input_17_groups_0"), val = int32(1)];
309
+ tensor<fp16, [5120, 1280, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = string("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199536832)))];
310
+ tensor<fp16, [5120]> layers_1_fc1_bias_to_fp16 = const()[name = string("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(212644096)))];
311
+ tensor<fp16, [1, 5120, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = string("input_17_cast_fp16")];
312
+ string input_mode_0 = const()[name = string("input_mode_0"), val = string("EXACT")];
313
+ tensor<fp16, [1, 5120, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_17_cast_fp16)[name = string("input_cast_fp16")];
314
+ string hidden_states_5_pad_type_0 = const()[name = string("hidden_states_5_pad_type_0"), val = string("valid")];
315
+ tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = string("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
316
+ tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = string("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
317
+ tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = string("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
318
+ int32 hidden_states_5_groups_0 = const()[name = string("hidden_states_5_groups_0"), val = int32(1)];
319
+ tensor<fp16, [1280, 5120, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = string("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(212654400)))];
320
+ tensor<fp16, [1280]> layers_1_fc2_bias_to_fp16 = const()[name = string("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225761664)))];
321
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_cast_fp16)[name = string("hidden_states_5_cast_fp16")];
322
+ tensor<fp16, [1, 1280, 1, 1]> inputs_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = string("inputs_cast_fp16")];
323
+ tensor<int32, [1]> out_axes_0 = const()[name = string("out_axes_0"), val = tensor<int32, [1]>([1])];
324
+ fp16 var_487_to_fp16 = const()[name = string("op_487_to_fp16"), val = fp16(0x1.5p-17)];
325
+ tensor<fp16, [1, 1280, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_487_to_fp16, x = inputs_cast_fp16)[name = string("out_cast_fp16")];
326
+ tensor<fp16, [1280]> hidden_states_gamma_0_to_fp16 = const()[name = string("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225764288)))];
327
+ tensor<fp16, [1280]> hidden_states_beta_0_to_fp16 = const()[name = string("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225766912)))];
328
+ fp16 hidden_states_epsilon_0_to_fp16 = const()[name = string("hidden_states_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
329
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_cast_fp16)[name = string("hidden_states_cast_fp16")];
330
+ tensor<int32, [1]> var_498_axes_0 = const()[name = string("op_498_axes_0"), val = tensor<int32, [1]>([2])];
331
+ tensor<fp16, [1, 1280, 1]> var_498_cast_fp16 = squeeze(axes = var_498_axes_0, x = hidden_states_cast_fp16)[name = string("op_498_cast_fp16")];
332
+ tensor<int32, [3]> var_501_perm_0 = const()[name = string("op_501_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
333
+ tensor<fp16, [51866]> linear_0_bias_0_to_fp16 = const()[name = string("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51866]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225769536)))];
334
+ tensor<fp16, [1, 1, 1280]> var_501_cast_fp16 = transpose(perm = var_501_perm_0, x = var_498_cast_fp16)[name = string("transpose_0")];
335
+ tensor<fp16, [1, 1, 51866]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_501_cast_fp16)[name = string("linear_0_cast_fp16")];
336
+ int32 var_505 = const()[name = string("op_505"), val = int32(1)];
337
+ bool obj_45_interleave_0 = const()[name = string("obj_45_interleave_0"), val = bool(false)];
338
+ tensor<fp16, [1, 2560, 1, 1]> key_cache_updates = concat(axis = var_505, interleave = obj_45_interleave_0, values = (current_key_1_cast_fp16, current_key_cast_fp16))[name = string("obj_45_cast_fp16")];
339
+ int32 var_508 = const()[name = string("op_508"), val = int32(1)];
340
+ bool obj_47_interleave_0 = const()[name = string("obj_47_interleave_0"), val = bool(false)];
341
+ tensor<fp16, [1, 2560, 1, 1]> value_cache_updates = concat(axis = var_508, interleave = obj_47_interleave_0, values = (current_value_1_cast_fp16, current_value_cast_fp16))[name = string("obj_47_cast_fp16")];
342
+ tensor<int32, [4]> var_519_begin_0 = const()[name = string("op_519_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
343
+ tensor<int32, [4]> var_519_end_0 = const()[name = string("op_519_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
344
+ tensor<bool, [4]> var_519_end_mask_0 = const()[name = string("op_519_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
345
+ tensor<fp16, [1, 1, 1, 1536]> var_519_cast_fp16 = slice_by_index(begin = var_519_begin_0, end = var_519_end_0, end_mask = var_519_end_mask_0, x = obj_41_cast_fp16)[name = string("op_519_cast_fp16")];
346
+ tensor<int32, [4]> var_522_begin_0 = const()[name = string("op_522_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
347
+ tensor<int32, [4]> var_522_end_0 = const()[name = string("op_522_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
348
+ tensor<bool, [4]> var_522_end_mask_0 = const()[name = string("op_522_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
349
+ tensor<bool, [4]> var_522_squeeze_mask_0 = const()[name = string("op_522_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
350
+ tensor<fp16, [1, 1, 1536]> var_522_cast_fp16 = slice_by_index(begin = var_522_begin_0, end = var_522_end_0, end_mask = var_522_end_mask_0, squeeze_mask = var_522_squeeze_mask_0, x = var_519_cast_fp16)[name = string("op_522_cast_fp16")];
351
+ tensor<int32, [4]> var_537_begin_0 = const()[name = string("op_537_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])];
352
+ tensor<int32, [4]> var_537_end_0 = const()[name = string("op_537_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1536])];
353
+ tensor<bool, [4]> var_537_end_mask_0 = const()[name = string("op_537_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
354
+ tensor<fp16, [1, 1, 1, 1536]> var_537_cast_fp16 = slice_by_index(begin = var_537_begin_0, end = var_537_end_0, end_mask = var_537_end_mask_0, x = obj_41_cast_fp16)[name = string("op_537_cast_fp16")];
355
+ tensor<int32, [4]> var_540_begin_0 = const()[name = string("op_540_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
356
+ tensor<int32, [4]> var_540_end_0 = const()[name = string("op_540_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
357
+ tensor<bool, [4]> var_540_end_mask_0 = const()[name = string("op_540_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
358
+ tensor<bool, [4]> var_540_squeeze_mask_0 = const()[name = string("op_540_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
359
+ tensor<fp16, [1, 1, 1536]> var_540_cast_fp16 = slice_by_index(begin = var_540_begin_0, end = var_540_end_0, end_mask = var_540_end_mask_0, squeeze_mask = var_540_squeeze_mask_0, x = var_537_cast_fp16)[name = string("op_540_cast_fp16")];
360
+ tensor<int32, [4]> var_555_begin_0 = const()[name = string("op_555_begin_0"), val = tensor<int32, [4]>([0, 2, 0, 0])];
361
+ tensor<int32, [4]> var_555_end_0 = const()[name = string("op_555_end_0"), val = tensor<int32, [4]>([1, 3, 1, 1536])];
362
+ tensor<bool, [4]> var_555_end_mask_0 = const()[name = string("op_555_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
363
+ tensor<fp16, [1, 1, 1, 1536]> var_555_cast_fp16 = slice_by_index(begin = var_555_begin_0, end = var_555_end_0, end_mask = var_555_end_mask_0, x = obj_41_cast_fp16)[name = string("op_555_cast_fp16")];
364
+ tensor<int32, [4]> var_558_begin_0 = const()[name = string("op_558_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
365
+ tensor<int32, [4]> var_558_end_0 = const()[name = string("op_558_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
366
+ tensor<bool, [4]> var_558_end_mask_0 = const()[name = string("op_558_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
367
+ tensor<bool, [4]> var_558_squeeze_mask_0 = const()[name = string("op_558_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
368
+ tensor<fp16, [1, 1, 1536]> var_558_cast_fp16 = slice_by_index(begin = var_558_begin_0, end = var_558_end_0, end_mask = var_558_end_mask_0, squeeze_mask = var_558_squeeze_mask_0, x = var_555_cast_fp16)[name = string("op_558_cast_fp16")];
369
+ tensor<int32, [4]> var_573_begin_0 = const()[name = string("op_573_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])];
370
+ tensor<int32, [4]> var_573_end_0 = const()[name = string("op_573_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1536])];
371
+ tensor<bool, [4]> var_573_end_mask_0 = const()[name = string("op_573_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
372
+ tensor<fp16, [1, 1, 1, 1536]> var_573_cast_fp16 = slice_by_index(begin = var_573_begin_0, end = var_573_end_0, end_mask = var_573_end_mask_0, x = obj_41_cast_fp16)[name = string("op_573_cast_fp16")];
373
+ tensor<int32, [4]> var_576_begin_0 = const()[name = string("op_576_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
374
+ tensor<int32, [4]> var_576_end_0 = const()[name = string("op_576_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
375
+ tensor<bool, [4]> var_576_end_mask_0 = const()[name = string("op_576_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
376
+ tensor<bool, [4]> var_576_squeeze_mask_0 = const()[name = string("op_576_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
377
+ tensor<fp16, [1, 1, 1536]> var_576_cast_fp16 = slice_by_index(begin = var_576_begin_0, end = var_576_end_0, end_mask = var_576_end_mask_0, squeeze_mask = var_576_squeeze_mask_0, x = var_573_cast_fp16)[name = string("op_576_cast_fp16")];
378
+ tensor<int32, [4]> var_591_begin_0 = const()[name = string("op_591_begin_0"), val = tensor<int32, [4]>([0, 4, 0, 0])];
379
+ tensor<int32, [4]> var_591_end_0 = const()[name = string("op_591_end_0"), val = tensor<int32, [4]>([1, 5, 1, 1536])];
380
+ tensor<bool, [4]> var_591_end_mask_0 = const()[name = string("op_591_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
381
+ tensor<fp16, [1, 1, 1, 1536]> var_591_cast_fp16 = slice_by_index(begin = var_591_begin_0, end = var_591_end_0, end_mask = var_591_end_mask_0, x = obj_41_cast_fp16)[name = string("op_591_cast_fp16")];
382
+ tensor<int32, [4]> var_594_begin_0 = const()[name = string("op_594_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
383
+ tensor<int32, [4]> var_594_end_0 = const()[name = string("op_594_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
384
+ tensor<bool, [4]> var_594_end_mask_0 = const()[name = string("op_594_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
385
+ tensor<bool, [4]> var_594_squeeze_mask_0 = const()[name = string("op_594_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
386
+ tensor<fp16, [1, 1, 1536]> var_594_cast_fp16 = slice_by_index(begin = var_594_begin_0, end = var_594_end_0, end_mask = var_594_end_mask_0, squeeze_mask = var_594_squeeze_mask_0, x = var_591_cast_fp16)[name = string("op_594_cast_fp16")];
387
+ tensor<int32, [4]> var_609_begin_0 = const()[name = string("op_609_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])];
388
+ tensor<int32, [4]> var_609_end_0 = const()[name = string("op_609_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1536])];
389
+ tensor<bool, [4]> var_609_end_mask_0 = const()[name = string("op_609_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
390
+ tensor<fp16, [1, 1, 1, 1536]> var_609_cast_fp16 = slice_by_index(begin = var_609_begin_0, end = var_609_end_0, end_mask = var_609_end_mask_0, x = obj_41_cast_fp16)[name = string("op_609_cast_fp16")];
391
+ tensor<int32, [4]> var_612_begin_0 = const()[name = string("op_612_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
392
+ tensor<int32, [4]> var_612_end_0 = const()[name = string("op_612_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
393
+ tensor<bool, [4]> var_612_end_mask_0 = const()[name = string("op_612_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
394
+ tensor<bool, [4]> var_612_squeeze_mask_0 = const()[name = string("op_612_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
395
+ tensor<fp16, [1, 1, 1536]> var_612_cast_fp16 = slice_by_index(begin = var_612_begin_0, end = var_612_end_0, end_mask = var_612_end_mask_0, squeeze_mask = var_612_squeeze_mask_0, x = var_609_cast_fp16)[name = string("op_612_cast_fp16")];
396
+ tensor<int32, [4]> var_627_begin_0 = const()[name = string("op_627_begin_0"), val = tensor<int32, [4]>([0, 6, 0, 0])];
397
+ tensor<int32, [4]> var_627_end_0 = const()[name = string("op_627_end_0"), val = tensor<int32, [4]>([1, 7, 1, 1536])];
398
+ tensor<bool, [4]> var_627_end_mask_0 = const()[name = string("op_627_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
399
+ tensor<fp16, [1, 1, 1, 1536]> var_627_cast_fp16 = slice_by_index(begin = var_627_begin_0, end = var_627_end_0, end_mask = var_627_end_mask_0, x = obj_41_cast_fp16)[name = string("op_627_cast_fp16")];
400
+ tensor<int32, [4]> var_630_begin_0 = const()[name = string("op_630_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
401
+ tensor<int32, [4]> var_630_end_0 = const()[name = string("op_630_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
402
+ tensor<bool, [4]> var_630_end_mask_0 = const()[name = string("op_630_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
403
+ tensor<bool, [4]> var_630_squeeze_mask_0 = const()[name = string("op_630_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
404
+ tensor<fp16, [1, 1, 1536]> var_630_cast_fp16 = slice_by_index(begin = var_630_begin_0, end = var_630_end_0, end_mask = var_630_end_mask_0, squeeze_mask = var_630_squeeze_mask_0, x = var_627_cast_fp16)[name = string("op_630_cast_fp16")];
405
+ tensor<int32, [4]> var_645_begin_0 = const()[name = string("op_645_begin_0"), val = tensor<int32, [4]>([0, 7, 0, 0])];
406
+ tensor<int32, [4]> var_645_end_0 = const()[name = string("op_645_end_0"), val = tensor<int32, [4]>([1, 8, 1, 1536])];
407
+ tensor<bool, [4]> var_645_end_mask_0 = const()[name = string("op_645_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
408
+ tensor<fp16, [1, 1, 1, 1536]> var_645_cast_fp16 = slice_by_index(begin = var_645_begin_0, end = var_645_end_0, end_mask = var_645_end_mask_0, x = obj_41_cast_fp16)[name = string("op_645_cast_fp16")];
409
+ tensor<int32, [4]> var_648_begin_0 = const()[name = string("op_648_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
410
+ tensor<int32, [4]> var_648_end_0 = const()[name = string("op_648_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
411
+ tensor<bool, [4]> var_648_end_mask_0 = const()[name = string("op_648_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
412
+ tensor<bool, [4]> var_648_squeeze_mask_0 = const()[name = string("op_648_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
413
+ tensor<fp16, [1, 1, 1536]> var_648_cast_fp16 = slice_by_index(begin = var_648_begin_0, end = var_648_end_0, end_mask = var_648_end_mask_0, squeeze_mask = var_648_squeeze_mask_0, x = var_645_cast_fp16)[name = string("op_648_cast_fp16")];
414
+ tensor<int32, [4]> var_663_begin_0 = const()[name = string("op_663_begin_0"), val = tensor<int32, [4]>([0, 8, 0, 0])];
415
+ tensor<int32, [4]> var_663_end_0 = const()[name = string("op_663_end_0"), val = tensor<int32, [4]>([1, 9, 1, 1536])];
416
+ tensor<bool, [4]> var_663_end_mask_0 = const()[name = string("op_663_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
417
+ tensor<fp16, [1, 1, 1, 1536]> var_663_cast_fp16 = slice_by_index(begin = var_663_begin_0, end = var_663_end_0, end_mask = var_663_end_mask_0, x = obj_41_cast_fp16)[name = string("op_663_cast_fp16")];
418
+ tensor<int32, [4]> var_666_begin_0 = const()[name = string("op_666_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
419
+ tensor<int32, [4]> var_666_end_0 = const()[name = string("op_666_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
420
+ tensor<bool, [4]> var_666_end_mask_0 = const()[name = string("op_666_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
421
+ tensor<bool, [4]> var_666_squeeze_mask_0 = const()[name = string("op_666_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
422
+ tensor<fp16, [1, 1, 1536]> var_666_cast_fp16 = slice_by_index(begin = var_666_begin_0, end = var_666_end_0, end_mask = var_666_end_mask_0, squeeze_mask = var_666_squeeze_mask_0, x = var_663_cast_fp16)[name = string("op_666_cast_fp16")];
423
+ tensor<int32, [4]> var_681_begin_0 = const()[name = string("op_681_begin_0"), val = tensor<int32, [4]>([0, 9, 0, 0])];
424
+ tensor<int32, [4]> var_681_end_0 = const()[name = string("op_681_end_0"), val = tensor<int32, [4]>([1, 10, 1, 1536])];
425
+ tensor<bool, [4]> var_681_end_mask_0 = const()[name = string("op_681_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
426
+ tensor<fp16, [1, 1, 1, 1536]> var_681_cast_fp16 = slice_by_index(begin = var_681_begin_0, end = var_681_end_0, end_mask = var_681_end_mask_0, x = obj_41_cast_fp16)[name = string("op_681_cast_fp16")];
427
+ tensor<int32, [4]> var_684_begin_0 = const()[name = string("op_684_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
428
+ tensor<int32, [4]> var_684_end_0 = const()[name = string("op_684_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
429
+ tensor<bool, [4]> var_684_end_mask_0 = const()[name = string("op_684_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
430
+ tensor<bool, [4]> var_684_squeeze_mask_0 = const()[name = string("op_684_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
431
+ tensor<fp16, [1, 1, 1536]> var_684_cast_fp16 = slice_by_index(begin = var_684_begin_0, end = var_684_end_0, end_mask = var_684_end_mask_0, squeeze_mask = var_684_squeeze_mask_0, x = var_681_cast_fp16)[name = string("op_684_cast_fp16")];
432
+ tensor<int32, [4]> var_699_begin_0 = const()[name = string("op_699_begin_0"), val = tensor<int32, [4]>([0, 10, 0, 0])];
433
+ tensor<int32, [4]> var_699_end_0 = const()[name = string("op_699_end_0"), val = tensor<int32, [4]>([1, 11, 1, 1536])];
434
+ tensor<bool, [4]> var_699_end_mask_0 = const()[name = string("op_699_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
435
+ tensor<fp16, [1, 1, 1, 1536]> var_699_cast_fp16 = slice_by_index(begin = var_699_begin_0, end = var_699_end_0, end_mask = var_699_end_mask_0, x = obj_41_cast_fp16)[name = string("op_699_cast_fp16")];
436
+ tensor<int32, [4]> var_702_begin_0 = const()[name = string("op_702_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
437
+ tensor<int32, [4]> var_702_end_0 = const()[name = string("op_702_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
438
+ tensor<bool, [4]> var_702_end_mask_0 = const()[name = string("op_702_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
439
+ tensor<bool, [4]> var_702_squeeze_mask_0 = const()[name = string("op_702_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
440
+ tensor<fp16, [1, 1, 1536]> var_702_cast_fp16 = slice_by_index(begin = var_702_begin_0, end = var_702_end_0, end_mask = var_702_end_mask_0, squeeze_mask = var_702_squeeze_mask_0, x = var_699_cast_fp16)[name = string("op_702_cast_fp16")];
441
+ tensor<int32, [4]> var_717_begin_0 = const()[name = string("op_717_begin_0"), val = tensor<int32, [4]>([0, 11, 0, 0])];
442
+ tensor<int32, [4]> var_717_end_0 = const()[name = string("op_717_end_0"), val = tensor<int32, [4]>([1, 12, 1, 1536])];
443
+ tensor<bool, [4]> var_717_end_mask_0 = const()[name = string("op_717_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
444
+ tensor<fp16, [1, 1, 1, 1536]> var_717_cast_fp16 = slice_by_index(begin = var_717_begin_0, end = var_717_end_0, end_mask = var_717_end_mask_0, x = obj_41_cast_fp16)[name = string("op_717_cast_fp16")];
445
+ tensor<int32, [4]> var_720_begin_0 = const()[name = string("op_720_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
446
+ tensor<int32, [4]> var_720_end_0 = const()[name = string("op_720_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
447
+ tensor<bool, [4]> var_720_end_mask_0 = const()[name = string("op_720_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
448
+ tensor<bool, [4]> var_720_squeeze_mask_0 = const()[name = string("op_720_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
449
+ tensor<fp16, [1, 1, 1536]> var_720_cast_fp16 = slice_by_index(begin = var_720_begin_0, end = var_720_end_0, end_mask = var_720_end_mask_0, squeeze_mask = var_720_squeeze_mask_0, x = var_717_cast_fp16)[name = string("op_720_cast_fp16")];
450
+ tensor<int32, [4]> var_735_begin_0 = const()[name = string("op_735_begin_0"), val = tensor<int32, [4]>([0, 12, 0, 0])];
451
+ tensor<int32, [4]> var_735_end_0 = const()[name = string("op_735_end_0"), val = tensor<int32, [4]>([1, 13, 1, 1536])];
452
+ tensor<bool, [4]> var_735_end_mask_0 = const()[name = string("op_735_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
453
+ tensor<fp16, [1, 1, 1, 1536]> var_735_cast_fp16 = slice_by_index(begin = var_735_begin_0, end = var_735_end_0, end_mask = var_735_end_mask_0, x = obj_41_cast_fp16)[name = string("op_735_cast_fp16")];
454
+ tensor<int32, [4]> var_738_begin_0 = const()[name = string("op_738_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
455
+ tensor<int32, [4]> var_738_end_0 = const()[name = string("op_738_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
456
+ tensor<bool, [4]> var_738_end_mask_0 = const()[name = string("op_738_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
457
+ tensor<bool, [4]> var_738_squeeze_mask_0 = const()[name = string("op_738_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
458
+ tensor<fp16, [1, 1, 1536]> var_738_cast_fp16 = slice_by_index(begin = var_738_begin_0, end = var_738_end_0, end_mask = var_738_end_mask_0, squeeze_mask = var_738_squeeze_mask_0, x = var_735_cast_fp16)[name = string("op_738_cast_fp16")];
459
+ tensor<int32, [4]> var_753_begin_0 = const()[name = string("op_753_begin_0"), val = tensor<int32, [4]>([0, 13, 0, 0])];
460
+ tensor<int32, [4]> var_753_end_0 = const()[name = string("op_753_end_0"), val = tensor<int32, [4]>([1, 14, 1, 1536])];
461
+ tensor<bool, [4]> var_753_end_mask_0 = const()[name = string("op_753_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
462
+ tensor<fp16, [1, 1, 1, 1536]> var_753_cast_fp16 = slice_by_index(begin = var_753_begin_0, end = var_753_end_0, end_mask = var_753_end_mask_0, x = obj_41_cast_fp16)[name = string("op_753_cast_fp16")];
463
+ tensor<int32, [4]> var_756_begin_0 = const()[name = string("op_756_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
464
+ tensor<int32, [4]> var_756_end_0 = const()[name = string("op_756_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
465
+ tensor<bool, [4]> var_756_end_mask_0 = const()[name = string("op_756_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
466
+ tensor<bool, [4]> var_756_squeeze_mask_0 = const()[name = string("op_756_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
467
+ tensor<fp16, [1, 1, 1536]> var_756_cast_fp16 = slice_by_index(begin = var_756_begin_0, end = var_756_end_0, end_mask = var_756_end_mask_0, squeeze_mask = var_756_squeeze_mask_0, x = var_753_cast_fp16)[name = string("op_756_cast_fp16")];
468
+ tensor<int32, [4]> var_771_begin_0 = const()[name = string("op_771_begin_0"), val = tensor<int32, [4]>([0, 14, 0, 0])];
469
+ tensor<int32, [4]> var_771_end_0 = const()[name = string("op_771_end_0"), val = tensor<int32, [4]>([1, 15, 1, 1536])];
470
+ tensor<bool, [4]> var_771_end_mask_0 = const()[name = string("op_771_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
471
+ tensor<fp16, [1, 1, 1, 1536]> var_771_cast_fp16 = slice_by_index(begin = var_771_begin_0, end = var_771_end_0, end_mask = var_771_end_mask_0, x = obj_41_cast_fp16)[name = string("op_771_cast_fp16")];
472
+ tensor<int32, [4]> var_774_begin_0 = const()[name = string("op_774_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
473
+ tensor<int32, [4]> var_774_end_0 = const()[name = string("op_774_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
474
+ tensor<bool, [4]> var_774_end_mask_0 = const()[name = string("op_774_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
475
+ tensor<bool, [4]> var_774_squeeze_mask_0 = const()[name = string("op_774_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
476
+ tensor<fp16, [1, 1, 1536]> var_774_cast_fp16 = slice_by_index(begin = var_774_begin_0, end = var_774_end_0, end_mask = var_774_end_mask_0, squeeze_mask = var_774_squeeze_mask_0, x = var_771_cast_fp16)[name = string("op_774_cast_fp16")];
477
+ tensor<int32, [4]> var_789_begin_0 = const()[name = string("op_789_begin_0"), val = tensor<int32, [4]>([0, 15, 0, 0])];
478
+ tensor<int32, [4]> var_789_end_0 = const()[name = string("op_789_end_0"), val = tensor<int32, [4]>([1, 16, 1, 1536])];
479
+ tensor<bool, [4]> var_789_end_mask_0 = const()[name = string("op_789_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
480
+ tensor<fp16, [1, 1, 1, 1536]> var_789_cast_fp16 = slice_by_index(begin = var_789_begin_0, end = var_789_end_0, end_mask = var_789_end_mask_0, x = obj_41_cast_fp16)[name = string("op_789_cast_fp16")];
481
+ tensor<int32, [4]> var_792_begin_0 = const()[name = string("op_792_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
482
+ tensor<int32, [4]> var_792_end_0 = const()[name = string("op_792_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
483
+ tensor<bool, [4]> var_792_end_mask_0 = const()[name = string("op_792_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
484
+ tensor<bool, [4]> var_792_squeeze_mask_0 = const()[name = string("op_792_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
485
+ tensor<fp16, [1, 1, 1536]> var_792_cast_fp16 = slice_by_index(begin = var_792_begin_0, end = var_792_end_0, end_mask = var_792_end_mask_0, squeeze_mask = var_792_squeeze_mask_0, x = var_789_cast_fp16)[name = string("op_792_cast_fp16")];
486
+ tensor<int32, [4]> var_807_begin_0 = const()[name = string("op_807_begin_0"), val = tensor<int32, [4]>([0, 16, 0, 0])];
487
+ tensor<int32, [4]> var_807_end_0 = const()[name = string("op_807_end_0"), val = tensor<int32, [4]>([1, 17, 1, 1536])];
488
+ tensor<bool, [4]> var_807_end_mask_0 = const()[name = string("op_807_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
489
+ tensor<fp16, [1, 1, 1, 1536]> var_807_cast_fp16 = slice_by_index(begin = var_807_begin_0, end = var_807_end_0, end_mask = var_807_end_mask_0, x = obj_41_cast_fp16)[name = string("op_807_cast_fp16")];
490
+ tensor<int32, [4]> var_810_begin_0 = const()[name = string("op_810_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
491
+ tensor<int32, [4]> var_810_end_0 = const()[name = string("op_810_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
492
+ tensor<bool, [4]> var_810_end_mask_0 = const()[name = string("op_810_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
493
+ tensor<bool, [4]> var_810_squeeze_mask_0 = const()[name = string("op_810_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
494
+ tensor<fp16, [1, 1, 1536]> var_810_cast_fp16 = slice_by_index(begin = var_810_begin_0, end = var_810_end_0, end_mask = var_810_end_mask_0, squeeze_mask = var_810_squeeze_mask_0, x = var_807_cast_fp16)[name = string("op_810_cast_fp16")];
495
+ tensor<int32, [4]> var_825_begin_0 = const()[name = string("op_825_begin_0"), val = tensor<int32, [4]>([0, 17, 0, 0])];
496
+ tensor<int32, [4]> var_825_end_0 = const()[name = string("op_825_end_0"), val = tensor<int32, [4]>([1, 18, 1, 1536])];
497
+ tensor<bool, [4]> var_825_end_mask_0 = const()[name = string("op_825_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
498
+ tensor<fp16, [1, 1, 1, 1536]> var_825_cast_fp16 = slice_by_index(begin = var_825_begin_0, end = var_825_end_0, end_mask = var_825_end_mask_0, x = obj_41_cast_fp16)[name = string("op_825_cast_fp16")];
499
+ tensor<int32, [4]> var_828_begin_0 = const()[name = string("op_828_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
500
+ tensor<int32, [4]> var_828_end_0 = const()[name = string("op_828_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
501
+ tensor<bool, [4]> var_828_end_mask_0 = const()[name = string("op_828_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
502
+ tensor<bool, [4]> var_828_squeeze_mask_0 = const()[name = string("op_828_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
503
+ tensor<fp16, [1, 1, 1536]> var_828_cast_fp16 = slice_by_index(begin = var_828_begin_0, end = var_828_end_0, end_mask = var_828_end_mask_0, squeeze_mask = var_828_squeeze_mask_0, x = var_825_cast_fp16)[name = string("op_828_cast_fp16")];
504
+ tensor<int32, [4]> var_843_begin_0 = const()[name = string("op_843_begin_0"), val = tensor<int32, [4]>([0, 18, 0, 0])];
505
+ tensor<int32, [4]> var_843_end_0 = const()[name = string("op_843_end_0"), val = tensor<int32, [4]>([1, 19, 1, 1536])];
506
+ tensor<bool, [4]> var_843_end_mask_0 = const()[name = string("op_843_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
507
+ tensor<fp16, [1, 1, 1, 1536]> var_843_cast_fp16 = slice_by_index(begin = var_843_begin_0, end = var_843_end_0, end_mask = var_843_end_mask_0, x = obj_41_cast_fp16)[name = string("op_843_cast_fp16")];
508
+ tensor<int32, [4]> var_846_begin_0 = const()[name = string("op_846_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
509
+ tensor<int32, [4]> var_846_end_0 = const()[name = string("op_846_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
510
+ tensor<bool, [4]> var_846_end_mask_0 = const()[name = string("op_846_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
511
+ tensor<bool, [4]> var_846_squeeze_mask_0 = const()[name = string("op_846_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
512
+ tensor<fp16, [1, 1, 1536]> var_846_cast_fp16 = slice_by_index(begin = var_846_begin_0, end = var_846_end_0, end_mask = var_846_end_mask_0, squeeze_mask = var_846_squeeze_mask_0, x = var_843_cast_fp16)[name = string("op_846_cast_fp16")];
513
+ tensor<int32, [4]> var_861_begin_0 = const()[name = string("op_861_begin_0"), val = tensor<int32, [4]>([0, 19, 0, 0])];
514
+ tensor<int32, [4]> var_861_end_0 = const()[name = string("op_861_end_0"), val = tensor<int32, [4]>([1, 20, 1, 1536])];
515
+ tensor<bool, [4]> var_861_end_mask_0 = const()[name = string("op_861_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
516
+ tensor<fp16, [1, 1, 1, 1536]> var_861_cast_fp16 = slice_by_index(begin = var_861_begin_0, end = var_861_end_0, end_mask = var_861_end_mask_0, x = obj_41_cast_fp16)[name = string("op_861_cast_fp16")];
517
+ tensor<int32, [4]> var_864_begin_0 = const()[name = string("op_864_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
518
+ tensor<int32, [4]> var_864_end_0 = const()[name = string("op_864_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
519
+ tensor<bool, [4]> var_864_end_mask_0 = const()[name = string("op_864_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
520
+ tensor<bool, [4]> var_864_squeeze_mask_0 = const()[name = string("op_864_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
521
+ tensor<fp16, [1, 1, 1536]> var_864_cast_fp16 = slice_by_index(begin = var_864_begin_0, end = var_864_end_0, end_mask = var_864_end_mask_0, squeeze_mask = var_864_squeeze_mask_0, x = var_861_cast_fp16)[name = string("op_864_cast_fp16")];
522
+ int32 var_871 = const()[name = string("op_871"), val = int32(1)];
523
+ bool var_872_interleave_0 = const()[name = string("op_872_interleave_0"), val = bool(false)];
524
+ tensor<fp16, [1, 20, 1536]> var_872_cast_fp16 = concat(axis = var_871, interleave = var_872_interleave_0, values = (var_522_cast_fp16, var_540_cast_fp16, var_558_cast_fp16, var_576_cast_fp16, var_594_cast_fp16, var_612_cast_fp16, var_630_cast_fp16, var_648_cast_fp16, var_666_cast_fp16, var_684_cast_fp16, var_702_cast_fp16, var_720_cast_fp16, var_738_cast_fp16, var_756_cast_fp16, var_774_cast_fp16, var_792_cast_fp16, var_810_cast_fp16, var_828_cast_fp16, var_846_cast_fp16, var_864_cast_fp16))[name = string("op_872_cast_fp16")];
525
+ bool var_875 = const()[name = string("op_875"), val = bool(false)];
526
+ tensor<int32, [1]> obj_axes_0 = const()[name = string("obj_axes_0"), val = tensor<int32, [1]>([1])];
527
+ tensor<fp16, [1, 1536]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_875, x = var_872_cast_fp16)[name = string("obj_cast_fp16")];
528
+ } -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights);
529
+ }
distil-whisper_distil-large-v3/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c1afaacaec2fac64e8867d758742347e10c849fdbf81c8761344b5c56a55b5d
3
+ size 225873332
distil-whisper_distil-large-v3/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "./distil-large-v3", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "classifier_proj_size": 256, "d_model": 1280, "decoder_attention_heads": 20, "decoder_ffn_dim": 5120, "decoder_layerdrop": 0.0, "decoder_layers": 2, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 20, "encoder_ffn_dim": 5120, "encoder_layerdrop": 0.0, "encoder_layers": 32, "eos_token_id": 50257, "init_std": 0.02, "is_encoder_decoder": true, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 32, "num_mel_bins": 128, "pad_token_id": 50256, "scale_embedding": false, "torch_dtype": "float16", "transformers_version": "4.38.0.dev0", "use_cache": true, "use_weighted_layer_sum": false, "vocab_size": 51866}
distil-whisper_distil-large-v3/generation_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alignment_heads": [[7, 0], [10, 17], [12, 18], [13, 12], [16, 1], [17, 14], [19, 11], [21, 4], [24, 1], [25, 6]], "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "decoder_start_token_id": 50258, "eos_token_id": 50257, "forced_decoder_ids": [[1, null], [2, 50360]], "is_multilingual": true, "lang_to_id": {"<|af|>": 50327, "<|am|>": 50334, "<|ar|>": 50272, "<|as|>": 50350, "<|az|>": 50304, "<|ba|>": 50355, "<|be|>": 50330, "<|bg|>": 50292, "<|bn|>": 50302, "<|bo|>": 50347, "<|br|>": 50309, "<|bs|>": 50315, "<|ca|>": 50270, "<|cs|>": 50283, "<|cy|>": 50297, "<|da|>": 50285, "<|de|>": 50261, "<|el|>": 50281, "<|en|>": 50259, "<|es|>": 50262, "<|et|>": 50307, "<|eu|>": 50310, "<|fa|>": 50300, "<|fi|>": 50277, "<|fo|>": 50338, "<|fr|>": 50265, "<|gl|>": 50319, "<|gu|>": 50333, "<|haw|>": 50352, "<|ha|>": 50354, "<|he|>": 50279, "<|hi|>": 50276, "<|hr|>": 50291, "<|ht|>": 50339, "<|hu|>": 50286, "<|hy|>": 50312, "<|id|>": 50275, "<|is|>": 50311, "<|it|>": 50274, "<|ja|>": 50266, "<|jw|>": 50356, "<|ka|>": 50329, "<|kk|>": 50316, "<|km|>": 50323, "<|kn|>": 50306, "<|ko|>": 50264, "<|la|>": 50294, "<|lb|>": 50345, "<|ln|>": 50353, "<|lo|>": 50336, "<|lt|>": 50293, "<|lv|>": 50301, "<|mg|>": 50349, "<|mi|>": 50295, "<|mk|>": 50308, "<|ml|>": 50296, "<|mn|>": 50314, "<|mr|>": 50320, "<|ms|>": 50282, "<|mt|>": 50343, "<|my|>": 50346, "<|ne|>": 50313, "<|nl|>": 50271, "<|nn|>": 50342, "<|no|>": 50288, "<|oc|>": 50328, "<|pa|>": 50321, "<|pl|>": 50269, "<|ps|>": 50340, "<|pt|>": 50267, "<|ro|>": 50284, "<|ru|>": 50263, "<|sa|>": 50344, "<|sd|>": 50332, "<|si|>": 50322, "<|sk|>": 50298, "<|sl|>": 50305, "<|sn|>": 50324, "<|so|>": 50326, "<|sq|>": 50317, "<|sr|>": 50303, "<|su|>": 50357, "<|sv|>": 50273, "<|sw|>": 50318, "<|ta|>": 50287, "<|te|>": 50299, "<|tg|>": 50331, "<|th|>": 50289, "<|tk|>": 50341, "<|tl|>": 50348, "<|tr|>": 50268, "<|tt|>": 50351, "<|uk|>": 50280, "<|ur|>": 50290, "<|uz|>": 50337, "<|vi|>": 50278, "<|yi|>": 50335, "<|yo|>": 50325, "<|yue|>": 50358, "<|zh|>": 50260}, "language": "<|en|>", "max_initial_timestamp_index": 50, "max_length": 448, "no_timestamps_token_id": 50364, "pad_token_id": 50257, "prev_sot_token_id": 50362, "return_timestamps": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50359, 50360, 50361, 50362, 50363], "task": "transcribe", "task_to_id": {"transcribe": 50360, "translate": 50359}, "transformers_version": "4.38.0.dev0"}
distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5916646e39691156fca37ff36b96f162e80acce84cd8ee2e971115edf412a87a
3
+ size 243
distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd63faaf82dd7f50bad861be88605fbdce6f59a2ced9954ef1a54a51f1e26ac
3
+ size 434
distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1280, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 2 × 1280 × 1 × 1536)",
21
+ "shortDescription" : "",
22
+ "shape" : "[2, 1280, 1, 1536]",
23
+ "name" : "encoder_attn_key_cache",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 2 × 1280 × 1 × 1536)",
31
+ "shortDescription" : "",
32
+ "shape" : "[2, 1280, 1, 1536]",
33
+ "name" : "encoder_attn_value_cache",
34
+ "type" : "MultiArray"
35
+ }
36
+ ],
37
+ "modelParameters" : [
38
+
39
+ ],
40
+ "specificationVersion" : 9,
41
+ "mlProgramOperationTypeHistogram" : {
42
+ "Pad" : 2,
43
+ "Ios18.batchNorm" : 65,
44
+ "Ios18.conv" : 198,
45
+ "Ios18.gelu" : 34,
46
+ "Ios18.concat" : 674,
47
+ "Ios16.einsum" : 5120,
48
+ "Ios18.add" : 65,
49
+ "Ios18.softmax" : 2560,
50
+ "Ios18.sliceByIndex" : 4480,
51
+ "Ios18.layerNorm" : 65,
52
+ "Ios18.transpose" : 32,
53
+ "Ios18.mul" : 2560
54
+ },
55
+ "computePrecision" : "Mixed (Float16, Int32)",
56
+ "isUpdatable" : "0",
57
+ "stateSchema" : [
58
+
59
+ ],
60
+ "availability" : {
61
+ "macOS" : "15.0",
62
+ "tvOS" : "18.0",
63
+ "visionOS" : "2.0",
64
+ "watchOS" : "11.0",
65
+ "iOS" : "18.0",
66
+ "macCatalyst" : "18.0"
67
+ },
68
+ "modelType" : {
69
+ "name" : "MLModelType_mlProgram"
70
+ },
71
+ "userDefinedMetadata" : {
72
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
73
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
74
+ "com.github.apple.coremltools.version" : "8.0"
75
+ },
76
+ "inputSchema" : [
77
+ {
78
+ "hasShapeFlexibility" : "0",
79
+ "isOptional" : "0",
80
+ "dataType" : "Float16",
81
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
82
+ "shortDescription" : "",
83
+ "shape" : "[1, 128, 1, 3000]",
84
+ "name" : "melspectrogram_features",
85
+ "type" : "MultiArray"
86
+ }
87
+ ],
88
+ "generatedClassName" : "AudioEncoderStateful",
89
+ "method" : "predict"
90
+ }
91
+ ]
distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
distil-whisper_distil-large-v3_turbo/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b43a5d9e21e95067e0af8cf4b8fcbd16cc8e6f99993084f5e67cdf81bde16e79
3
+ size 1287087104
distil-whisper_distil-large-v3_turbo/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0980462db89a546e1e90888ea38e0a5ddf1f1fec84608802cdbb12f8a5cc7215
3
+ size 243
distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6475c6649047ce609e3fe84b2525843c03342820662404540baf28146c174014
3
+ size 329
distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 128, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.mul" : 2,
23
+ "Ios18.square" : 2,
24
+ "Ios18.conv" : 2,
25
+ "Ios18.matmul" : 1,
26
+ "Ios18.expandDims" : 4,
27
+ "Ios18.sub" : 1,
28
+ "Ios18.log" : 1,
29
+ "Ios18.add" : 3,
30
+ "Ios18.sliceByIndex" : 1,
31
+ "Ios18.maximum" : 1,
32
+ "Ios18.squeeze" : 2,
33
+ "Ios18.reshape" : 2,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Pad" : 1
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
39
+ "isUpdatable" : "0",
40
+ "stateSchema" : [
41
+
42
+ ],
43
+ "availability" : {
44
+ "macOS" : "15.0",
45
+ "tvOS" : "18.0",
46
+ "visionOS" : "2.0",
47
+ "watchOS" : "11.0",
48
+ "iOS" : "18.0",
49
+ "macCatalyst" : "18.0"
50
+ },
51
+ "modelType" : {
52
+ "name" : "MLModelType_mlProgram"
53
+ },
54
+ "userDefinedMetadata" : {
55
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
56
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
57
+ "com.github.apple.coremltools.version" : "8.0"
58
+ },
59
+ "inputSchema" : [
60
+ {
61
+ "hasShapeFlexibility" : "0",
62
+ "isOptional" : "0",
63
+ "dataType" : "Float16",
64
+ "formattedType" : "MultiArray (Float16 480000)",
65
+ "shortDescription" : "",
66
+ "shape" : "[480000]",
67
+ "name" : "audio",
68
+ "type" : "MultiArray"
69
+ }
70
+ ],
71
+ "generatedClassName" : "MelSpectrogram",
72
+ "method" : "predict"
73
+ }
74
+ ]
distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios18>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = string("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = string("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = string("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ string input_3_mode_0 = const()[name = string("input_3_mode_0"), val = string("reflect")];
9
+ fp16 const_1_to_fp16 = const()[name = string("const_1_to_fp16"), val = fp16(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = string("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = string("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = string("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = string("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = string("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = string("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = string("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = string("expand_dims_4_cast_fp16")];
18
+ string conv_0_pad_type_0 = const()[name = string("conv_0_pad_type_0"), val = string("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = string("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = string("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ int32 conv_0_groups_0 = const()[name = string("conv_0_groups_0"), val = int32(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = string("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_0_cast_fp16")];
24
+ string conv_1_pad_type_0 = const()[name = string("conv_1_pad_type_0"), val = string("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = string("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = string("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ int32 conv_1_groups_0 = const()[name = string("conv_1_groups_0"), val = int32(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = string("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = string("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = string("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = string("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = string("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = string("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = string("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = string("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = string("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = string("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = string("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = string("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = string("magnitudes_cast_fp16")];
42
+ bool mel_spec_1_transpose_x_0 = const()[name = string("mel_spec_1_transpose_x_0"), val = bool(false)];
43
+ bool mel_spec_1_transpose_y_0 = const()[name = string("mel_spec_1_transpose_y_0"), val = bool(false)];
44
+ tensor<fp16, [128, 201]> mel_filters_to_fp16 = const()[name = string("mel_filters_to_fp16"), val = tensor<fp16, [128, 201]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(321856)))];
45
+ tensor<fp16, [128, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = string("mel_spec_1_cast_fp16")];
46
+ fp16 var_41_to_fp16 = const()[name = string("op_41_to_fp16"), val = fp16(0x1p-24)];
47
+ tensor<fp16, [128, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = string("mel_spec_cast_fp16")];
48
+ fp32 log_0_epsilon_0 = const()[name = string("log_0_epsilon_0"), val = fp32(0x1p-149)];
49
+ tensor<fp16, [128, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0, x = mel_spec_cast_fp16)[name = string("log_0_cast_fp16")];
50
+ fp16 mul_0_y_0_to_fp16 = const()[name = string("mul_0_y_0_to_fp16"), val = fp16(0x1.bccp-2)];
51
+ tensor<fp16, [128, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = string("mul_0_cast_fp16")];
52
+ bool var_44_keep_dims_0 = const()[name = string("op_44_keep_dims_0"), val = bool(false)];
53
+ fp16 var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = string("op_44_cast_fp16")];
54
+ fp16 var_46_to_fp16 = const()[name = string("op_46_to_fp16"), val = fp16(0x1p+3)];
55
+ fp16 var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = string("op_47_cast_fp16")];
56
+ tensor<fp16, [128, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = string("log_spec_3_cast_fp16")];
57
+ fp16 var_50_to_fp16 = const()[name = string("op_50_to_fp16"), val = fp16(0x1p+2)];
58
+ tensor<fp16, [128, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = string("op_51_cast_fp16")];
59
+ fp16 _inversed_log_spec_y_0_to_fp16 = const()[name = string("_inversed_log_spec_y_0_to_fp16"), val = fp16(0x1p-2)];
60
+ tensor<fp16, [128, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = string("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = string("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 128, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = string("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = string("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 128, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = string("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
distil-whisper_distil-large-v3_turbo/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:009d9fb8f6b589accfa08cebf1c712ef07c3405229ce3cfb3a57ee033c9d8a49
3
+ size 373376
distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77cb1b565a336e7fc01586698e50aa32d9a2a8f1ca5c439172564f4af0515f5d
3
+ size 243
distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a5e6f62b5ae897c8f846e22cacbe7d4f7d6bdbeb5f46366e2387f1082676b62
3
+ size 754
distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51866)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51866]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 2560, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 2560, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1536)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1536]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 9,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Ios18.expandDims" : 8,
53
+ "Ios18.softmax" : 4,
54
+ "Ios18.mul" : 8,
55
+ "Ios18.matmul" : 8,
56
+ "Ios18.batchNorm" : 7,
57
+ "Ios16.reduceMean" : 1,
58
+ "Split" : 2,
59
+ "Ios18.readState" : 5,
60
+ "Ios18.gather" : 2,
61
+ "Ios18.add" : 15,
62
+ "Ios18.layerNorm" : 7,
63
+ "Ios18.reshape" : 16,
64
+ "Ios18.linear" : 1,
65
+ "Ios18.conv" : 16,
66
+ "Ios18.gelu" : 2,
67
+ "Ios18.concat" : 3,
68
+ "Ios18.cast" : 1,
69
+ "Ios18.transpose" : 1,
70
+ "Ios18.sliceByIndex" : 44,
71
+ "Ios18.squeeze" : 1
72
+ },
73
+ "computePrecision" : "Mixed (Float16, Int32, UInt16)",
74
+ "isUpdatable" : "0",
75
+ "stateSchema" : [
76
+ {
77
+ "dataType" : "Float16",
78
+ "isOptional" : "0",
79
+ "formattedType" : "State (Float16 1 × 1536)",
80
+ "shortDescription" : "",
81
+ "shape" : "[1, 1536]",
82
+ "name" : "encoder_attn_key_padding_mask",
83
+ "type" : "State"
84
+ },
85
+ {
86
+ "dataType" : "Float16",
87
+ "isOptional" : "0",
88
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 1536)",
89
+ "shortDescription" : "",
90
+ "shape" : "[2, 1280, 1, 1536]",
91
+ "name" : "encoder_attn_key_cache",
92
+ "type" : "State"
93
+ },
94
+ {
95
+ "dataType" : "Float16",
96
+ "isOptional" : "0",
97
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 1536)",
98
+ "shortDescription" : "",
99
+ "shape" : "[2, 1280, 1, 1536]",
100
+ "name" : "encoder_attn_value_cache",
101
+ "type" : "State"
102
+ },
103
+ {
104
+ "dataType" : "Float16",
105
+ "isOptional" : "0",
106
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 448)",
107
+ "shortDescription" : "",
108
+ "shape" : "[2, 1280, 1, 448]",
109
+ "name" : "self_attn_key_cache",
110
+ "type" : "State"
111
+ },
112
+ {
113
+ "dataType" : "Float16",
114
+ "isOptional" : "0",
115
+ "formattedType" : "State (Float16 2 × 1280 × 1 × 448)",
116
+ "shortDescription" : "",
117
+ "shape" : "[2, 1280, 1, 448]",
118
+ "name" : "self_attn_value_cache",
119
+ "type" : "State"
120
+ }
121
+ ],
122
+ "availability" : {
123
+ "macOS" : "15.0",
124
+ "tvOS" : "18.0",
125
+ "visionOS" : "2.0",
126
+ "watchOS" : "11.0",
127
+ "iOS" : "18.0",
128
+ "macCatalyst" : "18.0"
129
+ },
130
+ "modelType" : {
131
+ "name" : "MLModelType_mlProgram"
132
+ },
133
+ "userDefinedMetadata" : {
134
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
135
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
136
+ "com.github.apple.coremltools.version" : "8.0"
137
+ },
138
+ "inputSchema" : [
139
+ {
140
+ "hasShapeFlexibility" : "0",
141
+ "isOptional" : "0",
142
+ "dataType" : "Int32",
143
+ "formattedType" : "MultiArray (Int32 1)",
144
+ "shortDescription" : "",
145
+ "shape" : "[1]",
146
+ "name" : "input_ids",
147
+ "type" : "MultiArray"
148
+ },
149
+ {
150
+ "hasShapeFlexibility" : "0",
151
+ "isOptional" : "0",
152
+ "dataType" : "Int32",
153
+ "formattedType" : "MultiArray (Int32 1)",
154
+ "shortDescription" : "",
155
+ "shape" : "[1]",
156
+ "name" : "cache_length",
157
+ "type" : "MultiArray"
158
+ },
159
+ {
160
+ "hasShapeFlexibility" : "0",
161
+ "isOptional" : "0",
162
+ "dataType" : "Float16",
163
+ "formattedType" : "MultiArray (Float16 1 × 448)",
164
+ "shortDescription" : "",
165
+ "shape" : "[1, 448]",
166
+ "name" : "kv_cache_update_mask",
167
+ "type" : "MultiArray"
168
+ },
169
+ {
170
+ "hasShapeFlexibility" : "0",
171
+ "isOptional" : "0",
172
+ "dataType" : "Float16",
173
+ "formattedType" : "MultiArray (Float16 1 × 448)",
174
+ "shortDescription" : "",
175
+ "shape" : "[1, 448]",
176
+ "name" : "decoder_key_padding_mask",
177
+ "type" : "MultiArray"
178
+ }
179
+ ],
180
+ "generatedClassName" : "TextDecoderStateful",
181
+ "method" : "predict"
182
+ }
183
+ ]
distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios18>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, state<tensor<fp16, [2, 1280, 1, 1536]>> encoder_attn_key_cache, state<tensor<fp16, [1, 1536]>> encoder_attn_key_padding_mask, state<tensor<fp16, [2, 1280, 1, 1536]>> encoder_attn_value_cache, tensor<int32, [1]> input_ids, tensor<fp16, [1, 448]> kv_cache_update_mask, state<tensor<fp16, [2, 1280, 1, 448]>> self_attn_key_cache, state<tensor<fp16, [2, 1280, 1, 448]>> self_attn_value_cache) {
5
+ int32 var_22_axis_0 = const()[name = string("op_22_axis_0"), val = int32(0)];
6
+ int32 var_22_batch_dims_0 = const()[name = string("op_22_batch_dims_0"), val = int32(0)];
7
+ bool var_22_validate_indices_0 = const()[name = string("op_22_validate_indices_0"), val = bool(false)];
8
+ tensor<fp16, [51866, 1280]> embed_tokens_weight_to_fp16 = const()[name = string("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51866, 1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
9
+ tensor<fp16, [1, 1280]> var_22_cast_fp16 = gather(axis = var_22_axis_0, batch_dims = var_22_batch_dims_0, indices = input_ids, validate_indices = var_22_validate_indices_0, x = embed_tokens_weight_to_fp16)[name = string("op_22_cast_fp16")];
10
+ int32 var_26_axis_0 = const()[name = string("op_26_axis_0"), val = int32(0)];
11
+ int32 var_26_batch_dims_0 = const()[name = string("op_26_batch_dims_0"), val = int32(0)];
12
+ bool var_26_validate_indices_0 = const()[name = string("op_26_validate_indices_0"), val = bool(false)];
13
+ tensor<fp16, [448, 1280]> embed_positions_weight_to_fp16 = const()[name = string("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(132777088)))];
14
+ string cache_length_to_uint16_dtype_0 = const()[name = string("cache_length_to_uint16_dtype_0"), val = string("uint16")];
15
+ tensor<uint16, [1]> cache_length_to_uint16 = cast(dtype = cache_length_to_uint16_dtype_0, x = cache_length)[name = string("cast_43")];
16
+ tensor<fp16, [1, 1280]> var_26_cast_fp16_cast_uint16 = gather(axis = var_26_axis_0, batch_dims = var_26_batch_dims_0, indices = cache_length_to_uint16, validate_indices = var_26_validate_indices_0, x = embed_positions_weight_to_fp16)[name = string("op_26_cast_fp16_cast_uint16")];
17
+ tensor<fp16, [1, 1280]> hidden_states_1_cast_fp16 = add(x = var_22_cast_fp16, y = var_26_cast_fp16_cast_uint16)[name = string("hidden_states_1_cast_fp16")];
18
+ tensor<int32, [1]> var_40_axes_0 = const()[name = string("op_40_axes_0"), val = tensor<int32, [1]>([2])];
19
+ tensor<fp16, [1, 1280, 1]> var_40_cast_fp16 = expand_dims(axes = var_40_axes_0, x = hidden_states_1_cast_fp16)[name = string("op_40_cast_fp16")];
20
+ tensor<int32, [1]> inputs_1_axes_0 = const()[name = string("inputs_1_axes_0"), val = tensor<int32, [1]>([3])];
21
+ tensor<fp16, [1, 1280, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_40_cast_fp16)[name = string("inputs_1_cast_fp16")];
22
+ tensor<fp16, [2, 1280, 1, 448]> read_state_0 = read_state(input = self_attn_key_cache)[name = string("read_state_0")];
23
+ tensor<int32, [2]> tile_0 = const()[name = string("tile_0"), val = tensor<int32, [2]>([1, 1])];
24
+ int32 var_45_axis_0 = const()[name = string("op_45_axis_0"), val = int32(0)];
25
+ tensor<fp16, [1, 1280, 1, 448]> var_45_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_45_cast_fp16_1 = split(axis = var_45_axis_0, split_sizes = tile_0, x = read_state_0)[name = string("op_45_cast_fp16")];
26
+ tensor<fp16, [2, 1280, 1, 448]> read_state_1 = read_state(input = self_attn_value_cache)[name = string("read_state_1")];
27
+ tensor<int32, [2]> tile_1 = const()[name = string("tile_1"), val = tensor<int32, [2]>([1, 1])];
28
+ int32 var_50_axis_0 = const()[name = string("op_50_axis_0"), val = int32(0)];
29
+ tensor<fp16, [1, 1280, 1, 448]> var_50_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_50_cast_fp16_1 = split(axis = var_50_axis_0, split_sizes = tile_1, x = read_state_1)[name = string("op_50_cast_fp16")];
30
+ tensor<fp16, [2, 1280, 1, 1536]> read_state_2 = read_state(input = encoder_attn_key_cache)[name = string("read_state_2")];
31
+ tensor<int32, [4]> obj_17_begin_0 = const()[name = string("obj_17_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
32
+ tensor<int32, [4]> obj_17_end_0 = const()[name = string("obj_17_end_0"), val = tensor<int32, [4]>([1, 1280, 1, 1536])];
33
+ tensor<bool, [4]> obj_17_end_mask_0 = const()[name = string("obj_17_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
34
+ tensor<fp16, [1, 1280, 1, 1536]> obj_17_cast_fp16 = slice_by_index(begin = obj_17_begin_0, end = obj_17_end_0, end_mask = obj_17_end_mask_0, x = read_state_2)[name = string("obj_17_cast_fp16")];
35
+ tensor<fp16, [2, 1280, 1, 1536]> read_state_3 = read_state(input = encoder_attn_value_cache)[name = string("read_state_3")];
36
+ tensor<int32, [4]> obj_19_begin_0 = const()[name = string("obj_19_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
37
+ tensor<int32, [4]> obj_19_end_0 = const()[name = string("obj_19_end_0"), val = tensor<int32, [4]>([1, 1280, 1, 1536])];
38
+ tensor<bool, [4]> obj_19_end_mask_0 = const()[name = string("obj_19_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
39
+ tensor<fp16, [1, 1280, 1, 1536]> obj_19_cast_fp16 = slice_by_index(begin = obj_19_begin_0, end = obj_19_end_0, end_mask = obj_19_end_mask_0, x = read_state_3)[name = string("obj_19_cast_fp16")];
40
+ int32 var_68 = const()[name = string("op_68"), val = int32(3)];
41
+ tensor<int32, [1]> out_1_axes_0 = const()[name = string("out_1_axes_0"), val = tensor<int32, [1]>([1])];
42
+ fp16 var_93_to_fp16 = const()[name = string("op_93_to_fp16"), val = fp16(0x1.5p-17)];
43
+ tensor<fp16, [1, 1280, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_93_to_fp16, x = inputs_1_cast_fp16)[name = string("out_1_cast_fp16")];
44
+ tensor<fp16, [1280]> obj_5_mean_0_to_fp16 = const()[name = string("obj_5_mean_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133924032)))];
45
+ tensor<fp16, [1280]> obj_5_variance_0_to_fp16 = const()[name = string("obj_5_variance_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133926656)))];
46
+ tensor<fp16, [1280]> obj_5_gamma_0_to_fp16 = const()[name = string("obj_5_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133929280)))];
47
+ tensor<fp16, [1280]> obj_5_beta_0_to_fp16 = const()[name = string("obj_5_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133931904)))];
48
+ fp16 obj_5_epsilon_0_to_fp16 = const()[name = string("obj_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
49
+ tensor<fp16, [1, 1280, 1, 1]> obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_1_cast_fp16)[name = string("obj_5_cast_fp16")];
50
+ string query_1_pad_type_0 = const()[name = string("query_1_pad_type_0"), val = string("valid")];
51
+ tensor<int32, [2]> query_1_strides_0 = const()[name = string("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
52
+ tensor<int32, [4]> query_1_pad_0 = const()[name = string("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
53
+ tensor<int32, [2]> query_1_dilations_0 = const()[name = string("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
54
+ int32 query_1_groups_0 = const()[name = string("query_1_groups_0"), val = int32(1)];
55
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(133934528)))];
56
+ tensor<fp16, [1280]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(137211392)))];
57
+ tensor<fp16, [1, 1280, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("query_1_cast_fp16")];
58
+ string current_key_1_pad_type_0 = const()[name = string("current_key_1_pad_type_0"), val = string("valid")];
59
+ tensor<int32, [2]> current_key_1_strides_0 = const()[name = string("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
60
+ tensor<int32, [4]> current_key_1_pad_0 = const()[name = string("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
61
+ tensor<int32, [2]> current_key_1_dilations_0 = const()[name = string("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
62
+ int32 current_key_1_groups_0 = const()[name = string("current_key_1_groups_0"), val = int32(1)];
63
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(137214016)))];
64
+ tensor<fp16, [1, 1280, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_key_1_cast_fp16")];
65
+ string current_value_1_pad_type_0 = const()[name = string("current_value_1_pad_type_0"), val = string("valid")];
66
+ tensor<int32, [2]> current_value_1_strides_0 = const()[name = string("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
67
+ tensor<int32, [4]> current_value_1_pad_0 = const()[name = string("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
68
+ tensor<int32, [2]> current_value_1_dilations_0 = const()[name = string("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
69
+ int32 current_value_1_groups_0 = const()[name = string("current_value_1_groups_0"), val = int32(1)];
70
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(140490880)))];
71
+ tensor<fp16, [1280]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(143767744)))];
72
+ tensor<fp16, [1, 1280, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_value_1_cast_fp16")];
73
+ tensor<int32, [1]> var_128_axes_0 = const()[name = string("op_128_axes_0"), val = tensor<int32, [1]>([1])];
74
+ tensor<fp16, [1, 1, 448]> var_128_cast_fp16 = expand_dims(axes = var_128_axes_0, x = kv_cache_update_mask)[name = string("op_128_cast_fp16")];
75
+ tensor<int32, [1]> var_129_axes_0 = const()[name = string("op_129_axes_0"), val = tensor<int32, [1]>([2])];
76
+ tensor<fp16, [1, 1, 1, 448]> var_129_cast_fp16 = expand_dims(axes = var_129_axes_0, x = var_128_cast_fp16)[name = string("op_129_cast_fp16")];
77
+ tensor<fp16, [1, 1280, 1, 448]> var_131_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_129_cast_fp16)[name = string("op_131_cast_fp16")];
78
+ tensor<fp16, [1, 1280, 1, 448]> key_1_cast_fp16 = add(x = var_45_cast_fp16_0, y = var_131_cast_fp16)[name = string("key_1_cast_fp16")];
79
+ tensor<fp16, [1, 1280, 1, 448]> var_133_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_129_cast_fp16)[name = string("op_133_cast_fp16")];
80
+ tensor<fp16, [1, 1280, 1, 448]> value_1_cast_fp16 = add(x = var_50_cast_fp16_0, y = var_133_cast_fp16)[name = string("value_1_cast_fp16")];
81
+ tensor<int32, [4]> var_136 = const()[name = string("op_136"), val = tensor<int32, [4]>([1, 20, 64, -1])];
82
+ tensor<fp16, [1, 20, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_136, x = query_1_cast_fp16)[name = string("mh_q_1_cast_fp16")];
83
+ fp16 var_138_to_fp16 = const()[name = string("op_138_to_fp16"), val = fp16(0x1p-3)];
84
+ tensor<fp16, [1, 20, 64, 1]> var_139_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_138_to_fp16)[name = string("op_139_cast_fp16")];
85
+ tensor<int32, [4]> var_140 = const()[name = string("op_140"), val = tensor<int32, [4]>([1, 20, 64, -1])];
86
+ tensor<fp16, [1, 20, 64, 448]> var_141_cast_fp16 = reshape(shape = var_140, x = key_1_cast_fp16)[name = string("op_141_cast_fp16")];
87
+ bool mh_w_1_transpose_x_0 = const()[name = string("mh_w_1_transpose_x_0"), val = bool(true)];
88
+ bool mh_w_1_transpose_y_0 = const()[name = string("mh_w_1_transpose_y_0"), val = bool(false)];
89
+ tensor<fp16, [1, 20, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_139_cast_fp16, y = var_141_cast_fp16)[name = string("mh_w_1_cast_fp16")];
90
+ tensor<int32, [1]> var_145_axes_0 = const()[name = string("op_145_axes_0"), val = tensor<int32, [1]>([1])];
91
+ tensor<fp16, [1, 1, 448]> var_145_cast_fp16 = expand_dims(axes = var_145_axes_0, x = decoder_key_padding_mask)[name = string("op_145_cast_fp16")];
92
+ tensor<int32, [1]> var_146_axes_0 = const()[name = string("op_146_axes_0"), val = tensor<int32, [1]>([2])];
93
+ tensor<fp16, [1, 1, 1, 448]> var_146_cast_fp16 = expand_dims(axes = var_146_axes_0, x = var_145_cast_fp16)[name = string("op_146_cast_fp16")];
94
+ tensor<fp16, [1, 20, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_146_cast_fp16)[name = string("mh_w_3_cast_fp16")];
95
+ tensor<fp16, [1, 20, 1, 448]> var_149_cast_fp16 = softmax(axis = var_68, x = mh_w_3_cast_fp16)[name = string("op_149_cast_fp16")];
96
+ tensor<int32, [4]> var_150 = const()[name = string("op_150"), val = tensor<int32, [4]>([1, 20, 64, -1])];
97
+ tensor<fp16, [1, 20, 64, 448]> var_151_cast_fp16 = reshape(shape = var_150, x = value_1_cast_fp16)[name = string("op_151_cast_fp16")];
98
+ bool attn_1_transpose_x_0 = const()[name = string("attn_1_transpose_x_0"), val = bool(false)];
99
+ bool attn_1_transpose_y_0 = const()[name = string("attn_1_transpose_y_0"), val = bool(true)];
100
+ tensor<fp16, [1, 20, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_151_cast_fp16, y = var_149_cast_fp16)[name = string("attn_1_cast_fp16")];
101
+ tensor<int32, [4]> var_154 = const()[name = string("op_154"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
102
+ tensor<fp16, [1, 1280, 1, 1]> input_1_cast_fp16 = reshape(shape = var_154, x = attn_1_cast_fp16)[name = string("input_1_cast_fp16")];
103
+ string obj_11_pad_type_0 = const()[name = string("obj_11_pad_type_0"), val = string("valid")];
104
+ tensor<int32, [2]> obj_11_strides_0 = const()[name = string("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
105
+ tensor<int32, [4]> obj_11_pad_0 = const()[name = string("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
106
+ tensor<int32, [2]> obj_11_dilations_0 = const()[name = string("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
107
+ int32 obj_11_groups_0 = const()[name = string("obj_11_groups_0"), val = int32(1)];
108
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(143770368)))];
109
+ tensor<fp16, [1280]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147047232)))];
110
+ tensor<fp16, [1, 1280, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = string("obj_11_cast_fp16")];
111
+ tensor<fp16, [1, 1280, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_11_cast_fp16)[name = string("inputs_3_cast_fp16")];
112
+ tensor<int32, [1]> out_3_axes_0 = const()[name = string("out_3_axes_0"), val = tensor<int32, [1]>([1])];
113
+ fp16 var_176_to_fp16 = const()[name = string("op_176_to_fp16"), val = fp16(0x1.5p-17)];
114
+ tensor<fp16, [1, 1280, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_176_to_fp16, x = inputs_3_cast_fp16)[name = string("out_3_cast_fp16")];
115
+ tensor<fp16, [1280]> obj_13_gamma_0_to_fp16 = const()[name = string("obj_13_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147049856)))];
116
+ tensor<fp16, [1280]> obj_13_beta_0_to_fp16 = const()[name = string("obj_13_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147052480)))];
117
+ fp16 obj_13_epsilon_0_to_fp16 = const()[name = string("obj_13_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
118
+ tensor<fp16, [1, 1280, 1, 1]> obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_3_cast_fp16)[name = string("obj_13_cast_fp16")];
119
+ string query_3_pad_type_0 = const()[name = string("query_3_pad_type_0"), val = string("valid")];
120
+ tensor<int32, [2]> query_3_strides_0 = const()[name = string("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
121
+ tensor<int32, [4]> query_3_pad_0 = const()[name = string("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
122
+ tensor<int32, [2]> query_3_dilations_0 = const()[name = string("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
123
+ int32 query_3_groups_0 = const()[name = string("query_3_groups_0"), val = int32(1)];
124
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(147055104)))];
125
+ tensor<fp16, [1280]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(150331968)))];
126
+ tensor<fp16, [1, 1280, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = string("query_3_cast_fp16")];
127
+ tensor<int32, [4]> var_196 = const()[name = string("op_196"), val = tensor<int32, [4]>([1, 20, 64, -1])];
128
+ tensor<fp16, [1, 20, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_196, x = query_3_cast_fp16)[name = string("mh_q_3_cast_fp16")];
129
+ fp16 var_198_to_fp16 = const()[name = string("op_198_to_fp16"), val = fp16(0x1p-3)];
130
+ tensor<fp16, [1, 20, 64, 1]> var_199_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_198_to_fp16)[name = string("op_199_cast_fp16")];
131
+ tensor<int32, [4]> var_200 = const()[name = string("op_200"), val = tensor<int32, [4]>([1, 20, 64, -1])];
132
+ tensor<fp16, [1, 20, 64, 1536]> var_201_cast_fp16 = reshape(shape = var_200, x = obj_17_cast_fp16)[name = string("op_201_cast_fp16")];
133
+ bool mh_w_5_transpose_x_0 = const()[name = string("mh_w_5_transpose_x_0"), val = bool(true)];
134
+ bool mh_w_5_transpose_y_0 = const()[name = string("mh_w_5_transpose_y_0"), val = bool(false)];
135
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_199_cast_fp16, y = var_201_cast_fp16)[name = string("mh_w_5_cast_fp16")];
136
+ tensor<fp16, [1, 1536]> read_state_4 = read_state(input = encoder_attn_key_padding_mask)[name = string("read_state_4")];
137
+ tensor<int32, [1]> var_205_axes_0 = const()[name = string("op_205_axes_0"), val = tensor<int32, [1]>([1])];
138
+ tensor<fp16, [1, 1, 1536]> var_205_cast_fp16 = expand_dims(axes = var_205_axes_0, x = read_state_4)[name = string("op_205_cast_fp16")];
139
+ tensor<int32, [1]> var_206_axes_0 = const()[name = string("op_206_axes_0"), val = tensor<int32, [1]>([2])];
140
+ tensor<fp16, [1, 1, 1, 1536]> var_206_cast_fp16 = expand_dims(axes = var_206_axes_0, x = var_205_cast_fp16)[name = string("op_206_cast_fp16")];
141
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_7_cast_fp16 = add(x = mh_w_5_cast_fp16, y = var_206_cast_fp16)[name = string("mh_w_7_cast_fp16")];
142
+ tensor<fp16, [1, 20, 1, 1536]> obj_23_cast_fp16 = softmax(axis = var_68, x = mh_w_7_cast_fp16)[name = string("obj_23_cast_fp16")];
143
+ tensor<int32, [4]> var_210 = const()[name = string("op_210"), val = tensor<int32, [4]>([1, 20, 64, -1])];
144
+ tensor<fp16, [1, 20, 64, 1536]> var_211_cast_fp16 = reshape(shape = var_210, x = obj_19_cast_fp16)[name = string("op_211_cast_fp16")];
145
+ bool attn_3_transpose_x_0 = const()[name = string("attn_3_transpose_x_0"), val = bool(false)];
146
+ bool attn_3_transpose_y_0 = const()[name = string("attn_3_transpose_y_0"), val = bool(true)];
147
+ tensor<fp16, [1, 20, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_211_cast_fp16, y = obj_23_cast_fp16)[name = string("attn_3_cast_fp16")];
148
+ tensor<int32, [4]> var_214 = const()[name = string("op_214"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
149
+ tensor<fp16, [1, 1280, 1, 1]> input_3_cast_fp16 = reshape(shape = var_214, x = attn_3_cast_fp16)[name = string("input_3_cast_fp16")];
150
+ string obj_21_pad_type_0 = const()[name = string("obj_21_pad_type_0"), val = string("valid")];
151
+ tensor<int32, [2]> obj_21_strides_0 = const()[name = string("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
152
+ tensor<int32, [4]> obj_21_pad_0 = const()[name = string("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
153
+ tensor<int32, [2]> obj_21_dilations_0 = const()[name = string("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
154
+ int32 obj_21_groups_0 = const()[name = string("obj_21_groups_0"), val = int32(1)];
155
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(150334592)))];
156
+ tensor<fp16, [1280]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153611456)))];
157
+ tensor<fp16, [1, 1280, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = string("obj_21_cast_fp16")];
158
+ tensor<fp16, [1, 1280, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_21_cast_fp16)[name = string("inputs_5_cast_fp16")];
159
+ tensor<int32, [1]> out_5_axes_0 = const()[name = string("out_5_axes_0"), val = tensor<int32, [1]>([1])];
160
+ fp16 var_232_to_fp16 = const()[name = string("op_232_to_fp16"), val = fp16(0x1.5p-17)];
161
+ tensor<fp16, [1, 1280, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_232_to_fp16, x = inputs_5_cast_fp16)[name = string("out_5_cast_fp16")];
162
+ tensor<fp16, [1280]> input_5_gamma_0_to_fp16 = const()[name = string("input_5_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153614080)))];
163
+ tensor<fp16, [1280]> input_5_beta_0_to_fp16 = const()[name = string("input_5_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153616704)))];
164
+ fp16 input_5_epsilon_0_to_fp16 = const()[name = string("input_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
165
+ tensor<fp16, [1, 1280, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_5_cast_fp16)[name = string("input_5_cast_fp16")];
166
+ string input_7_pad_type_0 = const()[name = string("input_7_pad_type_0"), val = string("valid")];
167
+ tensor<int32, [2]> input_7_strides_0 = const()[name = string("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
168
+ tensor<int32, [4]> input_7_pad_0 = const()[name = string("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
169
+ tensor<int32, [2]> input_7_dilations_0 = const()[name = string("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
170
+ int32 input_7_groups_0 = const()[name = string("input_7_groups_0"), val = int32(1)];
171
+ tensor<fp16, [5120, 1280, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = string("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(153619328)))];
172
+ tensor<fp16, [5120]> layers_0_fc1_bias_to_fp16 = const()[name = string("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(166726592)))];
173
+ tensor<fp16, [1, 5120, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = string("input_7_cast_fp16")];
174
+ string input_9_mode_0 = const()[name = string("input_9_mode_0"), val = string("EXACT")];
175
+ tensor<fp16, [1, 5120, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = string("input_9_cast_fp16")];
176
+ string hidden_states_3_pad_type_0 = const()[name = string("hidden_states_3_pad_type_0"), val = string("valid")];
177
+ tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = string("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
178
+ tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = string("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
179
+ tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = string("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
180
+ int32 hidden_states_3_groups_0 = const()[name = string("hidden_states_3_groups_0"), val = int32(1)];
181
+ tensor<fp16, [1280, 5120, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = string("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(166736896)))];
182
+ tensor<fp16, [1280]> layers_0_fc2_bias_to_fp16 = const()[name = string("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179844160)))];
183
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = string("hidden_states_3_cast_fp16")];
184
+ tensor<fp16, [1, 1280, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = string("inputs_7_cast_fp16")];
185
+ tensor<int32, [4]> obj_35_begin_0 = const()[name = string("obj_35_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])];
186
+ tensor<int32, [4]> obj_35_end_0 = const()[name = string("obj_35_end_0"), val = tensor<int32, [4]>([2, 1280, 1, 1536])];
187
+ tensor<bool, [4]> obj_35_end_mask_0 = const()[name = string("obj_35_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
188
+ tensor<fp16, [1, 1280, 1, 1536]> obj_35_cast_fp16 = slice_by_index(begin = obj_35_begin_0, end = obj_35_end_0, end_mask = obj_35_end_mask_0, x = read_state_2)[name = string("obj_35_cast_fp16")];
189
+ tensor<int32, [4]> obj_37_begin_0 = const()[name = string("obj_37_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])];
190
+ tensor<int32, [4]> obj_37_end_0 = const()[name = string("obj_37_end_0"), val = tensor<int32, [4]>([2, 1280, 1, 1536])];
191
+ tensor<bool, [4]> obj_37_end_mask_0 = const()[name = string("obj_37_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
192
+ tensor<fp16, [1, 1280, 1, 1536]> obj_37_cast_fp16 = slice_by_index(begin = obj_37_begin_0, end = obj_37_end_0, end_mask = obj_37_end_mask_0, x = read_state_3)[name = string("obj_37_cast_fp16")];
193
+ int32 var_277 = const()[name = string("op_277"), val = int32(3)];
194
+ tensor<int32, [1]> out_7_axes_0 = const()[name = string("out_7_axes_0"), val = tensor<int32, [1]>([1])];
195
+ fp16 var_302_to_fp16 = const()[name = string("op_302_to_fp16"), val = fp16(0x1.5p-17)];
196
+ tensor<fp16, [1, 1280, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_302_to_fp16, x = inputs_7_cast_fp16)[name = string("out_7_cast_fp16")];
197
+ tensor<fp16, [1280]> obj_25_gamma_0_to_fp16 = const()[name = string("obj_25_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179846784)))];
198
+ tensor<fp16, [1280]> obj_25_beta_0_to_fp16 = const()[name = string("obj_25_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179849408)))];
199
+ fp16 obj_25_epsilon_0_to_fp16 = const()[name = string("obj_25_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
200
+ tensor<fp16, [1, 1280, 1, 1]> obj_25_cast_fp16 = batch_norm(beta = obj_25_beta_0_to_fp16, epsilon = obj_25_epsilon_0_to_fp16, gamma = obj_25_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_7_cast_fp16)[name = string("obj_25_cast_fp16")];
201
+ string query_5_pad_type_0 = const()[name = string("query_5_pad_type_0"), val = string("valid")];
202
+ tensor<int32, [2]> query_5_strides_0 = const()[name = string("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
203
+ tensor<int32, [4]> query_5_pad_0 = const()[name = string("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
204
+ tensor<int32, [2]> query_5_dilations_0 = const()[name = string("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
205
+ int32 query_5_groups_0 = const()[name = string("query_5_groups_0"), val = int32(1)];
206
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(179852032)))];
207
+ tensor<fp16, [1280]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(183128896)))];
208
+ tensor<fp16, [1, 1280, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("query_5_cast_fp16")];
209
+ string current_key_pad_type_0 = const()[name = string("current_key_pad_type_0"), val = string("valid")];
210
+ tensor<int32, [2]> current_key_strides_0 = const()[name = string("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])];
211
+ tensor<int32, [4]> current_key_pad_0 = const()[name = string("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
212
+ tensor<int32, [2]> current_key_dilations_0 = const()[name = string("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])];
213
+ int32 current_key_groups_0 = const()[name = string("current_key_groups_0"), val = int32(1)];
214
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(183131520)))];
215
+ tensor<fp16, [1, 1280, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_key_cast_fp16")];
216
+ string current_value_pad_type_0 = const()[name = string("current_value_pad_type_0"), val = string("valid")];
217
+ tensor<int32, [2]> current_value_strides_0 = const()[name = string("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])];
218
+ tensor<int32, [4]> current_value_pad_0 = const()[name = string("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
219
+ tensor<int32, [2]> current_value_dilations_0 = const()[name = string("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])];
220
+ int32 current_value_groups_0 = const()[name = string("current_value_groups_0"), val = int32(1)];
221
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(186408384)))];
222
+ tensor<fp16, [1280]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(189685248)))];
223
+ tensor<fp16, [1, 1280, 1, 1]> current_value_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_value_cast_fp16")];
224
+ tensor<fp16, [1, 1280, 1, 448]> var_340_cast_fp16 = mul(x = current_key_cast_fp16, y = var_129_cast_fp16)[name = string("op_340_cast_fp16")];
225
+ tensor<fp16, [1, 1280, 1, 448]> key_cast_fp16 = add(x = var_45_cast_fp16_1, y = var_340_cast_fp16)[name = string("key_cast_fp16")];
226
+ tensor<fp16, [1, 1280, 1, 448]> var_342_cast_fp16 = mul(x = current_value_cast_fp16, y = var_129_cast_fp16)[name = string("op_342_cast_fp16")];
227
+ tensor<fp16, [1, 1280, 1, 448]> value_cast_fp16 = add(x = var_50_cast_fp16_1, y = var_342_cast_fp16)[name = string("value_cast_fp16")];
228
+ tensor<int32, [4]> var_345 = const()[name = string("op_345"), val = tensor<int32, [4]>([1, 20, 64, -1])];
229
+ tensor<fp16, [1, 20, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_345, x = query_5_cast_fp16)[name = string("mh_q_5_cast_fp16")];
230
+ fp16 var_347_to_fp16 = const()[name = string("op_347_to_fp16"), val = fp16(0x1p-3)];
231
+ tensor<fp16, [1, 20, 64, 1]> var_348_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_347_to_fp16)[name = string("op_348_cast_fp16")];
232
+ tensor<int32, [4]> var_349 = const()[name = string("op_349"), val = tensor<int32, [4]>([1, 20, 64, -1])];
233
+ tensor<fp16, [1, 20, 64, 448]> var_350_cast_fp16 = reshape(shape = var_349, x = key_cast_fp16)[name = string("op_350_cast_fp16")];
234
+ bool mh_w_9_transpose_x_0 = const()[name = string("mh_w_9_transpose_x_0"), val = bool(true)];
235
+ bool mh_w_9_transpose_y_0 = const()[name = string("mh_w_9_transpose_y_0"), val = bool(false)];
236
+ tensor<fp16, [1, 20, 1, 448]> mh_w_9_cast_fp16 = matmul(transpose_x = mh_w_9_transpose_x_0, transpose_y = mh_w_9_transpose_y_0, x = var_348_cast_fp16, y = var_350_cast_fp16)[name = string("mh_w_9_cast_fp16")];
237
+ tensor<fp16, [1, 20, 1, 448]> mh_w_11_cast_fp16 = add(x = mh_w_9_cast_fp16, y = var_146_cast_fp16)[name = string("mh_w_11_cast_fp16")];
238
+ tensor<fp16, [1, 20, 1, 448]> var_358_cast_fp16 = softmax(axis = var_277, x = mh_w_11_cast_fp16)[name = string("op_358_cast_fp16")];
239
+ tensor<int32, [4]> var_359 = const()[name = string("op_359"), val = tensor<int32, [4]>([1, 20, 64, -1])];
240
+ tensor<fp16, [1, 20, 64, 448]> var_360_cast_fp16 = reshape(shape = var_359, x = value_cast_fp16)[name = string("op_360_cast_fp16")];
241
+ bool attn_5_transpose_x_0 = const()[name = string("attn_5_transpose_x_0"), val = bool(false)];
242
+ bool attn_5_transpose_y_0 = const()[name = string("attn_5_transpose_y_0"), val = bool(true)];
243
+ tensor<fp16, [1, 20, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_360_cast_fp16, y = var_358_cast_fp16)[name = string("attn_5_cast_fp16")];
244
+ tensor<int32, [4]> var_363 = const()[name = string("op_363"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
245
+ tensor<fp16, [1, 1280, 1, 1]> input_11_cast_fp16 = reshape(shape = var_363, x = attn_5_cast_fp16)[name = string("input_11_cast_fp16")];
246
+ string obj_31_pad_type_0 = const()[name = string("obj_31_pad_type_0"), val = string("valid")];
247
+ tensor<int32, [2]> obj_31_strides_0 = const()[name = string("obj_31_strides_0"), val = tensor<int32, [2]>([1, 1])];
248
+ tensor<int32, [4]> obj_31_pad_0 = const()[name = string("obj_31_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
249
+ tensor<int32, [2]> obj_31_dilations_0 = const()[name = string("obj_31_dilations_0"), val = tensor<int32, [2]>([1, 1])];
250
+ int32 obj_31_groups_0 = const()[name = string("obj_31_groups_0"), val = int32(1)];
251
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(189687872)))];
252
+ tensor<fp16, [1280]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192964736)))];
253
+ tensor<fp16, [1, 1280, 1, 1]> obj_31_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_31_dilations_0, groups = obj_31_groups_0, pad = obj_31_pad_0, pad_type = obj_31_pad_type_0, strides = obj_31_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = string("obj_31_cast_fp16")];
254
+ tensor<fp16, [1, 1280, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_31_cast_fp16)[name = string("inputs_9_cast_fp16")];
255
+ tensor<int32, [1]> out_9_axes_0 = const()[name = string("out_9_axes_0"), val = tensor<int32, [1]>([1])];
256
+ fp16 var_385_to_fp16 = const()[name = string("op_385_to_fp16"), val = fp16(0x1.5p-17)];
257
+ tensor<fp16, [1, 1280, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_385_to_fp16, x = inputs_9_cast_fp16)[name = string("out_9_cast_fp16")];
258
+ tensor<fp16, [1280]> obj_33_gamma_0_to_fp16 = const()[name = string("obj_33_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192967360)))];
259
+ tensor<fp16, [1280]> obj_33_beta_0_to_fp16 = const()[name = string("obj_33_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192969984)))];
260
+ fp16 obj_33_epsilon_0_to_fp16 = const()[name = string("obj_33_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
261
+ tensor<fp16, [1, 1280, 1, 1]> obj_33_cast_fp16 = batch_norm(beta = obj_33_beta_0_to_fp16, epsilon = obj_33_epsilon_0_to_fp16, gamma = obj_33_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_9_cast_fp16)[name = string("obj_33_cast_fp16")];
262
+ string query_pad_type_0 = const()[name = string("query_pad_type_0"), val = string("valid")];
263
+ tensor<int32, [2]> query_strides_0 = const()[name = string("query_strides_0"), val = tensor<int32, [2]>([1, 1])];
264
+ tensor<int32, [4]> query_pad_0 = const()[name = string("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
265
+ tensor<int32, [2]> query_dilations_0 = const()[name = string("query_dilations_0"), val = tensor<int32, [2]>([1, 1])];
266
+ int32 query_groups_0 = const()[name = string("query_groups_0"), val = int32(1)];
267
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(192972608)))];
268
+ tensor<fp16, [1280]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(196249472)))];
269
+ tensor<fp16, [1, 1280, 1, 1]> query_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = string("query_cast_fp16")];
270
+ tensor<int32, [4]> var_405 = const()[name = string("op_405"), val = tensor<int32, [4]>([1, 20, 64, -1])];
271
+ tensor<fp16, [1, 20, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_405, x = query_cast_fp16)[name = string("mh_q_cast_fp16")];
272
+ fp16 var_407_to_fp16 = const()[name = string("op_407_to_fp16"), val = fp16(0x1p-3)];
273
+ tensor<fp16, [1, 20, 64, 1]> var_408_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_407_to_fp16)[name = string("op_408_cast_fp16")];
274
+ tensor<int32, [4]> var_409 = const()[name = string("op_409"), val = tensor<int32, [4]>([1, 20, 64, -1])];
275
+ tensor<fp16, [1, 20, 64, 1536]> var_410_cast_fp16 = reshape(shape = var_409, x = obj_35_cast_fp16)[name = string("op_410_cast_fp16")];
276
+ bool mh_w_13_transpose_x_0 = const()[name = string("mh_w_13_transpose_x_0"), val = bool(true)];
277
+ bool mh_w_13_transpose_y_0 = const()[name = string("mh_w_13_transpose_y_0"), val = bool(false)];
278
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_408_cast_fp16, y = var_410_cast_fp16)[name = string("mh_w_13_cast_fp16")];
279
+ tensor<fp16, [1, 20, 1, 1536]> mh_w_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_206_cast_fp16)[name = string("mh_w_cast_fp16")];
280
+ tensor<fp16, [1, 20, 1, 1536]> obj_41_cast_fp16 = softmax(axis = var_277, x = mh_w_cast_fp16)[name = string("obj_41_cast_fp16")];
281
+ tensor<int32, [4]> var_419 = const()[name = string("op_419"), val = tensor<int32, [4]>([1, 20, 64, -1])];
282
+ tensor<fp16, [1, 20, 64, 1536]> var_420_cast_fp16 = reshape(shape = var_419, x = obj_37_cast_fp16)[name = string("op_420_cast_fp16")];
283
+ bool attn_transpose_x_0 = const()[name = string("attn_transpose_x_0"), val = bool(false)];
284
+ bool attn_transpose_y_0 = const()[name = string("attn_transpose_y_0"), val = bool(true)];
285
+ tensor<fp16, [1, 20, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_420_cast_fp16, y = obj_41_cast_fp16)[name = string("attn_cast_fp16")];
286
+ tensor<int32, [4]> var_423 = const()[name = string("op_423"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
287
+ tensor<fp16, [1, 1280, 1, 1]> input_13_cast_fp16 = reshape(shape = var_423, x = attn_cast_fp16)[name = string("input_13_cast_fp16")];
288
+ string obj_39_pad_type_0 = const()[name = string("obj_39_pad_type_0"), val = string("valid")];
289
+ tensor<int32, [2]> obj_39_strides_0 = const()[name = string("obj_39_strides_0"), val = tensor<int32, [2]>([1, 1])];
290
+ tensor<int32, [4]> obj_39_pad_0 = const()[name = string("obj_39_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
291
+ tensor<int32, [2]> obj_39_dilations_0 = const()[name = string("obj_39_dilations_0"), val = tensor<int32, [2]>([1, 1])];
292
+ int32 obj_39_groups_0 = const()[name = string("obj_39_groups_0"), val = int32(1)];
293
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(196252096)))];
294
+ tensor<fp16, [1280]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199528960)))];
295
+ tensor<fp16, [1, 1280, 1, 1]> obj_39_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_39_dilations_0, groups = obj_39_groups_0, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = obj_39_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = string("obj_39_cast_fp16")];
296
+ tensor<fp16, [1, 1280, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_39_cast_fp16)[name = string("inputs_11_cast_fp16")];
297
+ tensor<int32, [1]> out_11_axes_0 = const()[name = string("out_11_axes_0"), val = tensor<int32, [1]>([1])];
298
+ fp16 var_444_to_fp16 = const()[name = string("op_444_to_fp16"), val = fp16(0x1.5p-17)];
299
+ tensor<fp16, [1, 1280, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_444_to_fp16, x = inputs_11_cast_fp16)[name = string("out_11_cast_fp16")];
300
+ tensor<fp16, [1280]> input_15_gamma_0_to_fp16 = const()[name = string("input_15_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199531584)))];
301
+ tensor<fp16, [1280]> input_15_beta_0_to_fp16 = const()[name = string("input_15_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199534208)))];
302
+ fp16 input_15_epsilon_0_to_fp16 = const()[name = string("input_15_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
303
+ tensor<fp16, [1, 1280, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_11_cast_fp16)[name = string("input_15_cast_fp16")];
304
+ string input_17_pad_type_0 = const()[name = string("input_17_pad_type_0"), val = string("valid")];
305
+ tensor<int32, [2]> input_17_strides_0 = const()[name = string("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])];
306
+ tensor<int32, [4]> input_17_pad_0 = const()[name = string("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
307
+ tensor<int32, [2]> input_17_dilations_0 = const()[name = string("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])];
308
+ int32 input_17_groups_0 = const()[name = string("input_17_groups_0"), val = int32(1)];
309
+ tensor<fp16, [5120, 1280, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = string("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(199536832)))];
310
+ tensor<fp16, [5120]> layers_1_fc1_bias_to_fp16 = const()[name = string("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(212644096)))];
311
+ tensor<fp16, [1, 5120, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = string("input_17_cast_fp16")];
312
+ string input_mode_0 = const()[name = string("input_mode_0"), val = string("EXACT")];
313
+ tensor<fp16, [1, 5120, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_17_cast_fp16)[name = string("input_cast_fp16")];
314
+ string hidden_states_5_pad_type_0 = const()[name = string("hidden_states_5_pad_type_0"), val = string("valid")];
315
+ tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = string("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
316
+ tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = string("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
317
+ tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = string("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
318
+ int32 hidden_states_5_groups_0 = const()[name = string("hidden_states_5_groups_0"), val = int32(1)];
319
+ tensor<fp16, [1280, 5120, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = string("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(212654400)))];
320
+ tensor<fp16, [1280]> layers_1_fc2_bias_to_fp16 = const()[name = string("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225761664)))];
321
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_cast_fp16)[name = string("hidden_states_5_cast_fp16")];
322
+ tensor<fp16, [1, 1280, 1, 1]> inputs_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = string("inputs_cast_fp16")];
323
+ tensor<int32, [1]> out_axes_0 = const()[name = string("out_axes_0"), val = tensor<int32, [1]>([1])];
324
+ fp16 var_487_to_fp16 = const()[name = string("op_487_to_fp16"), val = fp16(0x1.5p-17)];
325
+ tensor<fp16, [1, 1280, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_487_to_fp16, x = inputs_cast_fp16)[name = string("out_cast_fp16")];
326
+ tensor<fp16, [1280]> hidden_states_gamma_0_to_fp16 = const()[name = string("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225764288)))];
327
+ tensor<fp16, [1280]> hidden_states_beta_0_to_fp16 = const()[name = string("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225766912)))];
328
+ fp16 hidden_states_epsilon_0_to_fp16 = const()[name = string("hidden_states_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
329
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_cast_fp16)[name = string("hidden_states_cast_fp16")];
330
+ tensor<int32, [1]> var_498_axes_0 = const()[name = string("op_498_axes_0"), val = tensor<int32, [1]>([2])];
331
+ tensor<fp16, [1, 1280, 1]> var_498_cast_fp16 = squeeze(axes = var_498_axes_0, x = hidden_states_cast_fp16)[name = string("op_498_cast_fp16")];
332
+ tensor<int32, [3]> var_501_perm_0 = const()[name = string("op_501_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
333
+ tensor<fp16, [51866]> linear_0_bias_0_to_fp16 = const()[name = string("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51866]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(225769536)))];
334
+ tensor<fp16, [1, 1, 1280]> var_501_cast_fp16 = transpose(perm = var_501_perm_0, x = var_498_cast_fp16)[name = string("transpose_0")];
335
+ tensor<fp16, [1, 1, 51866]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_501_cast_fp16)[name = string("linear_0_cast_fp16")];
336
+ int32 var_505 = const()[name = string("op_505"), val = int32(1)];
337
+ bool obj_45_interleave_0 = const()[name = string("obj_45_interleave_0"), val = bool(false)];
338
+ tensor<fp16, [1, 2560, 1, 1]> key_cache_updates = concat(axis = var_505, interleave = obj_45_interleave_0, values = (current_key_1_cast_fp16, current_key_cast_fp16))[name = string("obj_45_cast_fp16")];
339
+ int32 var_508 = const()[name = string("op_508"), val = int32(1)];
340
+ bool obj_47_interleave_0 = const()[name = string("obj_47_interleave_0"), val = bool(false)];
341
+ tensor<fp16, [1, 2560, 1, 1]> value_cache_updates = concat(axis = var_508, interleave = obj_47_interleave_0, values = (current_value_1_cast_fp16, current_value_cast_fp16))[name = string("obj_47_cast_fp16")];
342
+ tensor<int32, [4]> var_519_begin_0 = const()[name = string("op_519_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
343
+ tensor<int32, [4]> var_519_end_0 = const()[name = string("op_519_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
344
+ tensor<bool, [4]> var_519_end_mask_0 = const()[name = string("op_519_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
345
+ tensor<fp16, [1, 1, 1, 1536]> var_519_cast_fp16 = slice_by_index(begin = var_519_begin_0, end = var_519_end_0, end_mask = var_519_end_mask_0, x = obj_41_cast_fp16)[name = string("op_519_cast_fp16")];
346
+ tensor<int32, [4]> var_522_begin_0 = const()[name = string("op_522_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
347
+ tensor<int32, [4]> var_522_end_0 = const()[name = string("op_522_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
348
+ tensor<bool, [4]> var_522_end_mask_0 = const()[name = string("op_522_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
349
+ tensor<bool, [4]> var_522_squeeze_mask_0 = const()[name = string("op_522_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
350
+ tensor<fp16, [1, 1, 1536]> var_522_cast_fp16 = slice_by_index(begin = var_522_begin_0, end = var_522_end_0, end_mask = var_522_end_mask_0, squeeze_mask = var_522_squeeze_mask_0, x = var_519_cast_fp16)[name = string("op_522_cast_fp16")];
351
+ tensor<int32, [4]> var_537_begin_0 = const()[name = string("op_537_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])];
352
+ tensor<int32, [4]> var_537_end_0 = const()[name = string("op_537_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1536])];
353
+ tensor<bool, [4]> var_537_end_mask_0 = const()[name = string("op_537_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
354
+ tensor<fp16, [1, 1, 1, 1536]> var_537_cast_fp16 = slice_by_index(begin = var_537_begin_0, end = var_537_end_0, end_mask = var_537_end_mask_0, x = obj_41_cast_fp16)[name = string("op_537_cast_fp16")];
355
+ tensor<int32, [4]> var_540_begin_0 = const()[name = string("op_540_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
356
+ tensor<int32, [4]> var_540_end_0 = const()[name = string("op_540_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
357
+ tensor<bool, [4]> var_540_end_mask_0 = const()[name = string("op_540_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
358
+ tensor<bool, [4]> var_540_squeeze_mask_0 = const()[name = string("op_540_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
359
+ tensor<fp16, [1, 1, 1536]> var_540_cast_fp16 = slice_by_index(begin = var_540_begin_0, end = var_540_end_0, end_mask = var_540_end_mask_0, squeeze_mask = var_540_squeeze_mask_0, x = var_537_cast_fp16)[name = string("op_540_cast_fp16")];
360
+ tensor<int32, [4]> var_555_begin_0 = const()[name = string("op_555_begin_0"), val = tensor<int32, [4]>([0, 2, 0, 0])];
361
+ tensor<int32, [4]> var_555_end_0 = const()[name = string("op_555_end_0"), val = tensor<int32, [4]>([1, 3, 1, 1536])];
362
+ tensor<bool, [4]> var_555_end_mask_0 = const()[name = string("op_555_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
363
+ tensor<fp16, [1, 1, 1, 1536]> var_555_cast_fp16 = slice_by_index(begin = var_555_begin_0, end = var_555_end_0, end_mask = var_555_end_mask_0, x = obj_41_cast_fp16)[name = string("op_555_cast_fp16")];
364
+ tensor<int32, [4]> var_558_begin_0 = const()[name = string("op_558_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
365
+ tensor<int32, [4]> var_558_end_0 = const()[name = string("op_558_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
366
+ tensor<bool, [4]> var_558_end_mask_0 = const()[name = string("op_558_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
367
+ tensor<bool, [4]> var_558_squeeze_mask_0 = const()[name = string("op_558_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
368
+ tensor<fp16, [1, 1, 1536]> var_558_cast_fp16 = slice_by_index(begin = var_558_begin_0, end = var_558_end_0, end_mask = var_558_end_mask_0, squeeze_mask = var_558_squeeze_mask_0, x = var_555_cast_fp16)[name = string("op_558_cast_fp16")];
369
+ tensor<int32, [4]> var_573_begin_0 = const()[name = string("op_573_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])];
370
+ tensor<int32, [4]> var_573_end_0 = const()[name = string("op_573_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1536])];
371
+ tensor<bool, [4]> var_573_end_mask_0 = const()[name = string("op_573_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
372
+ tensor<fp16, [1, 1, 1, 1536]> var_573_cast_fp16 = slice_by_index(begin = var_573_begin_0, end = var_573_end_0, end_mask = var_573_end_mask_0, x = obj_41_cast_fp16)[name = string("op_573_cast_fp16")];
373
+ tensor<int32, [4]> var_576_begin_0 = const()[name = string("op_576_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
374
+ tensor<int32, [4]> var_576_end_0 = const()[name = string("op_576_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
375
+ tensor<bool, [4]> var_576_end_mask_0 = const()[name = string("op_576_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
376
+ tensor<bool, [4]> var_576_squeeze_mask_0 = const()[name = string("op_576_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
377
+ tensor<fp16, [1, 1, 1536]> var_576_cast_fp16 = slice_by_index(begin = var_576_begin_0, end = var_576_end_0, end_mask = var_576_end_mask_0, squeeze_mask = var_576_squeeze_mask_0, x = var_573_cast_fp16)[name = string("op_576_cast_fp16")];
378
+ tensor<int32, [4]> var_591_begin_0 = const()[name = string("op_591_begin_0"), val = tensor<int32, [4]>([0, 4, 0, 0])];
379
+ tensor<int32, [4]> var_591_end_0 = const()[name = string("op_591_end_0"), val = tensor<int32, [4]>([1, 5, 1, 1536])];
380
+ tensor<bool, [4]> var_591_end_mask_0 = const()[name = string("op_591_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
381
+ tensor<fp16, [1, 1, 1, 1536]> var_591_cast_fp16 = slice_by_index(begin = var_591_begin_0, end = var_591_end_0, end_mask = var_591_end_mask_0, x = obj_41_cast_fp16)[name = string("op_591_cast_fp16")];
382
+ tensor<int32, [4]> var_594_begin_0 = const()[name = string("op_594_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
383
+ tensor<int32, [4]> var_594_end_0 = const()[name = string("op_594_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
384
+ tensor<bool, [4]> var_594_end_mask_0 = const()[name = string("op_594_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
385
+ tensor<bool, [4]> var_594_squeeze_mask_0 = const()[name = string("op_594_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
386
+ tensor<fp16, [1, 1, 1536]> var_594_cast_fp16 = slice_by_index(begin = var_594_begin_0, end = var_594_end_0, end_mask = var_594_end_mask_0, squeeze_mask = var_594_squeeze_mask_0, x = var_591_cast_fp16)[name = string("op_594_cast_fp16")];
387
+ tensor<int32, [4]> var_609_begin_0 = const()[name = string("op_609_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])];
388
+ tensor<int32, [4]> var_609_end_0 = const()[name = string("op_609_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1536])];
389
+ tensor<bool, [4]> var_609_end_mask_0 = const()[name = string("op_609_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
390
+ tensor<fp16, [1, 1, 1, 1536]> var_609_cast_fp16 = slice_by_index(begin = var_609_begin_0, end = var_609_end_0, end_mask = var_609_end_mask_0, x = obj_41_cast_fp16)[name = string("op_609_cast_fp16")];
391
+ tensor<int32, [4]> var_612_begin_0 = const()[name = string("op_612_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
392
+ tensor<int32, [4]> var_612_end_0 = const()[name = string("op_612_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
393
+ tensor<bool, [4]> var_612_end_mask_0 = const()[name = string("op_612_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
394
+ tensor<bool, [4]> var_612_squeeze_mask_0 = const()[name = string("op_612_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
395
+ tensor<fp16, [1, 1, 1536]> var_612_cast_fp16 = slice_by_index(begin = var_612_begin_0, end = var_612_end_0, end_mask = var_612_end_mask_0, squeeze_mask = var_612_squeeze_mask_0, x = var_609_cast_fp16)[name = string("op_612_cast_fp16")];
396
+ tensor<int32, [4]> var_627_begin_0 = const()[name = string("op_627_begin_0"), val = tensor<int32, [4]>([0, 6, 0, 0])];
397
+ tensor<int32, [4]> var_627_end_0 = const()[name = string("op_627_end_0"), val = tensor<int32, [4]>([1, 7, 1, 1536])];
398
+ tensor<bool, [4]> var_627_end_mask_0 = const()[name = string("op_627_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
399
+ tensor<fp16, [1, 1, 1, 1536]> var_627_cast_fp16 = slice_by_index(begin = var_627_begin_0, end = var_627_end_0, end_mask = var_627_end_mask_0, x = obj_41_cast_fp16)[name = string("op_627_cast_fp16")];
400
+ tensor<int32, [4]> var_630_begin_0 = const()[name = string("op_630_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
401
+ tensor<int32, [4]> var_630_end_0 = const()[name = string("op_630_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
402
+ tensor<bool, [4]> var_630_end_mask_0 = const()[name = string("op_630_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
403
+ tensor<bool, [4]> var_630_squeeze_mask_0 = const()[name = string("op_630_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
404
+ tensor<fp16, [1, 1, 1536]> var_630_cast_fp16 = slice_by_index(begin = var_630_begin_0, end = var_630_end_0, end_mask = var_630_end_mask_0, squeeze_mask = var_630_squeeze_mask_0, x = var_627_cast_fp16)[name = string("op_630_cast_fp16")];
405
+ tensor<int32, [4]> var_645_begin_0 = const()[name = string("op_645_begin_0"), val = tensor<int32, [4]>([0, 7, 0, 0])];
406
+ tensor<int32, [4]> var_645_end_0 = const()[name = string("op_645_end_0"), val = tensor<int32, [4]>([1, 8, 1, 1536])];
407
+ tensor<bool, [4]> var_645_end_mask_0 = const()[name = string("op_645_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
408
+ tensor<fp16, [1, 1, 1, 1536]> var_645_cast_fp16 = slice_by_index(begin = var_645_begin_0, end = var_645_end_0, end_mask = var_645_end_mask_0, x = obj_41_cast_fp16)[name = string("op_645_cast_fp16")];
409
+ tensor<int32, [4]> var_648_begin_0 = const()[name = string("op_648_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
410
+ tensor<int32, [4]> var_648_end_0 = const()[name = string("op_648_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
411
+ tensor<bool, [4]> var_648_end_mask_0 = const()[name = string("op_648_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
412
+ tensor<bool, [4]> var_648_squeeze_mask_0 = const()[name = string("op_648_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
413
+ tensor<fp16, [1, 1, 1536]> var_648_cast_fp16 = slice_by_index(begin = var_648_begin_0, end = var_648_end_0, end_mask = var_648_end_mask_0, squeeze_mask = var_648_squeeze_mask_0, x = var_645_cast_fp16)[name = string("op_648_cast_fp16")];
414
+ tensor<int32, [4]> var_663_begin_0 = const()[name = string("op_663_begin_0"), val = tensor<int32, [4]>([0, 8, 0, 0])];
415
+ tensor<int32, [4]> var_663_end_0 = const()[name = string("op_663_end_0"), val = tensor<int32, [4]>([1, 9, 1, 1536])];
416
+ tensor<bool, [4]> var_663_end_mask_0 = const()[name = string("op_663_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
417
+ tensor<fp16, [1, 1, 1, 1536]> var_663_cast_fp16 = slice_by_index(begin = var_663_begin_0, end = var_663_end_0, end_mask = var_663_end_mask_0, x = obj_41_cast_fp16)[name = string("op_663_cast_fp16")];
418
+ tensor<int32, [4]> var_666_begin_0 = const()[name = string("op_666_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
419
+ tensor<int32, [4]> var_666_end_0 = const()[name = string("op_666_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
420
+ tensor<bool, [4]> var_666_end_mask_0 = const()[name = string("op_666_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
421
+ tensor<bool, [4]> var_666_squeeze_mask_0 = const()[name = string("op_666_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
422
+ tensor<fp16, [1, 1, 1536]> var_666_cast_fp16 = slice_by_index(begin = var_666_begin_0, end = var_666_end_0, end_mask = var_666_end_mask_0, squeeze_mask = var_666_squeeze_mask_0, x = var_663_cast_fp16)[name = string("op_666_cast_fp16")];
423
+ tensor<int32, [4]> var_681_begin_0 = const()[name = string("op_681_begin_0"), val = tensor<int32, [4]>([0, 9, 0, 0])];
424
+ tensor<int32, [4]> var_681_end_0 = const()[name = string("op_681_end_0"), val = tensor<int32, [4]>([1, 10, 1, 1536])];
425
+ tensor<bool, [4]> var_681_end_mask_0 = const()[name = string("op_681_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
426
+ tensor<fp16, [1, 1, 1, 1536]> var_681_cast_fp16 = slice_by_index(begin = var_681_begin_0, end = var_681_end_0, end_mask = var_681_end_mask_0, x = obj_41_cast_fp16)[name = string("op_681_cast_fp16")];
427
+ tensor<int32, [4]> var_684_begin_0 = const()[name = string("op_684_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
428
+ tensor<int32, [4]> var_684_end_0 = const()[name = string("op_684_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
429
+ tensor<bool, [4]> var_684_end_mask_0 = const()[name = string("op_684_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
430
+ tensor<bool, [4]> var_684_squeeze_mask_0 = const()[name = string("op_684_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
431
+ tensor<fp16, [1, 1, 1536]> var_684_cast_fp16 = slice_by_index(begin = var_684_begin_0, end = var_684_end_0, end_mask = var_684_end_mask_0, squeeze_mask = var_684_squeeze_mask_0, x = var_681_cast_fp16)[name = string("op_684_cast_fp16")];
432
+ tensor<int32, [4]> var_699_begin_0 = const()[name = string("op_699_begin_0"), val = tensor<int32, [4]>([0, 10, 0, 0])];
433
+ tensor<int32, [4]> var_699_end_0 = const()[name = string("op_699_end_0"), val = tensor<int32, [4]>([1, 11, 1, 1536])];
434
+ tensor<bool, [4]> var_699_end_mask_0 = const()[name = string("op_699_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
435
+ tensor<fp16, [1, 1, 1, 1536]> var_699_cast_fp16 = slice_by_index(begin = var_699_begin_0, end = var_699_end_0, end_mask = var_699_end_mask_0, x = obj_41_cast_fp16)[name = string("op_699_cast_fp16")];
436
+ tensor<int32, [4]> var_702_begin_0 = const()[name = string("op_702_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
437
+ tensor<int32, [4]> var_702_end_0 = const()[name = string("op_702_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
438
+ tensor<bool, [4]> var_702_end_mask_0 = const()[name = string("op_702_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
439
+ tensor<bool, [4]> var_702_squeeze_mask_0 = const()[name = string("op_702_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
440
+ tensor<fp16, [1, 1, 1536]> var_702_cast_fp16 = slice_by_index(begin = var_702_begin_0, end = var_702_end_0, end_mask = var_702_end_mask_0, squeeze_mask = var_702_squeeze_mask_0, x = var_699_cast_fp16)[name = string("op_702_cast_fp16")];
441
+ tensor<int32, [4]> var_717_begin_0 = const()[name = string("op_717_begin_0"), val = tensor<int32, [4]>([0, 11, 0, 0])];
442
+ tensor<int32, [4]> var_717_end_0 = const()[name = string("op_717_end_0"), val = tensor<int32, [4]>([1, 12, 1, 1536])];
443
+ tensor<bool, [4]> var_717_end_mask_0 = const()[name = string("op_717_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
444
+ tensor<fp16, [1, 1, 1, 1536]> var_717_cast_fp16 = slice_by_index(begin = var_717_begin_0, end = var_717_end_0, end_mask = var_717_end_mask_0, x = obj_41_cast_fp16)[name = string("op_717_cast_fp16")];
445
+ tensor<int32, [4]> var_720_begin_0 = const()[name = string("op_720_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
446
+ tensor<int32, [4]> var_720_end_0 = const()[name = string("op_720_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
447
+ tensor<bool, [4]> var_720_end_mask_0 = const()[name = string("op_720_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
448
+ tensor<bool, [4]> var_720_squeeze_mask_0 = const()[name = string("op_720_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
449
+ tensor<fp16, [1, 1, 1536]> var_720_cast_fp16 = slice_by_index(begin = var_720_begin_0, end = var_720_end_0, end_mask = var_720_end_mask_0, squeeze_mask = var_720_squeeze_mask_0, x = var_717_cast_fp16)[name = string("op_720_cast_fp16")];
450
+ tensor<int32, [4]> var_735_begin_0 = const()[name = string("op_735_begin_0"), val = tensor<int32, [4]>([0, 12, 0, 0])];
451
+ tensor<int32, [4]> var_735_end_0 = const()[name = string("op_735_end_0"), val = tensor<int32, [4]>([1, 13, 1, 1536])];
452
+ tensor<bool, [4]> var_735_end_mask_0 = const()[name = string("op_735_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
453
+ tensor<fp16, [1, 1, 1, 1536]> var_735_cast_fp16 = slice_by_index(begin = var_735_begin_0, end = var_735_end_0, end_mask = var_735_end_mask_0, x = obj_41_cast_fp16)[name = string("op_735_cast_fp16")];
454
+ tensor<int32, [4]> var_738_begin_0 = const()[name = string("op_738_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
455
+ tensor<int32, [4]> var_738_end_0 = const()[name = string("op_738_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
456
+ tensor<bool, [4]> var_738_end_mask_0 = const()[name = string("op_738_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
457
+ tensor<bool, [4]> var_738_squeeze_mask_0 = const()[name = string("op_738_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
458
+ tensor<fp16, [1, 1, 1536]> var_738_cast_fp16 = slice_by_index(begin = var_738_begin_0, end = var_738_end_0, end_mask = var_738_end_mask_0, squeeze_mask = var_738_squeeze_mask_0, x = var_735_cast_fp16)[name = string("op_738_cast_fp16")];
459
+ tensor<int32, [4]> var_753_begin_0 = const()[name = string("op_753_begin_0"), val = tensor<int32, [4]>([0, 13, 0, 0])];
460
+ tensor<int32, [4]> var_753_end_0 = const()[name = string("op_753_end_0"), val = tensor<int32, [4]>([1, 14, 1, 1536])];
461
+ tensor<bool, [4]> var_753_end_mask_0 = const()[name = string("op_753_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
462
+ tensor<fp16, [1, 1, 1, 1536]> var_753_cast_fp16 = slice_by_index(begin = var_753_begin_0, end = var_753_end_0, end_mask = var_753_end_mask_0, x = obj_41_cast_fp16)[name = string("op_753_cast_fp16")];
463
+ tensor<int32, [4]> var_756_begin_0 = const()[name = string("op_756_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
464
+ tensor<int32, [4]> var_756_end_0 = const()[name = string("op_756_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
465
+ tensor<bool, [4]> var_756_end_mask_0 = const()[name = string("op_756_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
466
+ tensor<bool, [4]> var_756_squeeze_mask_0 = const()[name = string("op_756_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
467
+ tensor<fp16, [1, 1, 1536]> var_756_cast_fp16 = slice_by_index(begin = var_756_begin_0, end = var_756_end_0, end_mask = var_756_end_mask_0, squeeze_mask = var_756_squeeze_mask_0, x = var_753_cast_fp16)[name = string("op_756_cast_fp16")];
468
+ tensor<int32, [4]> var_771_begin_0 = const()[name = string("op_771_begin_0"), val = tensor<int32, [4]>([0, 14, 0, 0])];
469
+ tensor<int32, [4]> var_771_end_0 = const()[name = string("op_771_end_0"), val = tensor<int32, [4]>([1, 15, 1, 1536])];
470
+ tensor<bool, [4]> var_771_end_mask_0 = const()[name = string("op_771_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
471
+ tensor<fp16, [1, 1, 1, 1536]> var_771_cast_fp16 = slice_by_index(begin = var_771_begin_0, end = var_771_end_0, end_mask = var_771_end_mask_0, x = obj_41_cast_fp16)[name = string("op_771_cast_fp16")];
472
+ tensor<int32, [4]> var_774_begin_0 = const()[name = string("op_774_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
473
+ tensor<int32, [4]> var_774_end_0 = const()[name = string("op_774_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
474
+ tensor<bool, [4]> var_774_end_mask_0 = const()[name = string("op_774_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
475
+ tensor<bool, [4]> var_774_squeeze_mask_0 = const()[name = string("op_774_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
476
+ tensor<fp16, [1, 1, 1536]> var_774_cast_fp16 = slice_by_index(begin = var_774_begin_0, end = var_774_end_0, end_mask = var_774_end_mask_0, squeeze_mask = var_774_squeeze_mask_0, x = var_771_cast_fp16)[name = string("op_774_cast_fp16")];
477
+ tensor<int32, [4]> var_789_begin_0 = const()[name = string("op_789_begin_0"), val = tensor<int32, [4]>([0, 15, 0, 0])];
478
+ tensor<int32, [4]> var_789_end_0 = const()[name = string("op_789_end_0"), val = tensor<int32, [4]>([1, 16, 1, 1536])];
479
+ tensor<bool, [4]> var_789_end_mask_0 = const()[name = string("op_789_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
480
+ tensor<fp16, [1, 1, 1, 1536]> var_789_cast_fp16 = slice_by_index(begin = var_789_begin_0, end = var_789_end_0, end_mask = var_789_end_mask_0, x = obj_41_cast_fp16)[name = string("op_789_cast_fp16")];
481
+ tensor<int32, [4]> var_792_begin_0 = const()[name = string("op_792_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
482
+ tensor<int32, [4]> var_792_end_0 = const()[name = string("op_792_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
483
+ tensor<bool, [4]> var_792_end_mask_0 = const()[name = string("op_792_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
484
+ tensor<bool, [4]> var_792_squeeze_mask_0 = const()[name = string("op_792_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
485
+ tensor<fp16, [1, 1, 1536]> var_792_cast_fp16 = slice_by_index(begin = var_792_begin_0, end = var_792_end_0, end_mask = var_792_end_mask_0, squeeze_mask = var_792_squeeze_mask_0, x = var_789_cast_fp16)[name = string("op_792_cast_fp16")];
486
+ tensor<int32, [4]> var_807_begin_0 = const()[name = string("op_807_begin_0"), val = tensor<int32, [4]>([0, 16, 0, 0])];
487
+ tensor<int32, [4]> var_807_end_0 = const()[name = string("op_807_end_0"), val = tensor<int32, [4]>([1, 17, 1, 1536])];
488
+ tensor<bool, [4]> var_807_end_mask_0 = const()[name = string("op_807_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
489
+ tensor<fp16, [1, 1, 1, 1536]> var_807_cast_fp16 = slice_by_index(begin = var_807_begin_0, end = var_807_end_0, end_mask = var_807_end_mask_0, x = obj_41_cast_fp16)[name = string("op_807_cast_fp16")];
490
+ tensor<int32, [4]> var_810_begin_0 = const()[name = string("op_810_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
491
+ tensor<int32, [4]> var_810_end_0 = const()[name = string("op_810_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
492
+ tensor<bool, [4]> var_810_end_mask_0 = const()[name = string("op_810_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
493
+ tensor<bool, [4]> var_810_squeeze_mask_0 = const()[name = string("op_810_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
494
+ tensor<fp16, [1, 1, 1536]> var_810_cast_fp16 = slice_by_index(begin = var_810_begin_0, end = var_810_end_0, end_mask = var_810_end_mask_0, squeeze_mask = var_810_squeeze_mask_0, x = var_807_cast_fp16)[name = string("op_810_cast_fp16")];
495
+ tensor<int32, [4]> var_825_begin_0 = const()[name = string("op_825_begin_0"), val = tensor<int32, [4]>([0, 17, 0, 0])];
496
+ tensor<int32, [4]> var_825_end_0 = const()[name = string("op_825_end_0"), val = tensor<int32, [4]>([1, 18, 1, 1536])];
497
+ tensor<bool, [4]> var_825_end_mask_0 = const()[name = string("op_825_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
498
+ tensor<fp16, [1, 1, 1, 1536]> var_825_cast_fp16 = slice_by_index(begin = var_825_begin_0, end = var_825_end_0, end_mask = var_825_end_mask_0, x = obj_41_cast_fp16)[name = string("op_825_cast_fp16")];
499
+ tensor<int32, [4]> var_828_begin_0 = const()[name = string("op_828_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
500
+ tensor<int32, [4]> var_828_end_0 = const()[name = string("op_828_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
501
+ tensor<bool, [4]> var_828_end_mask_0 = const()[name = string("op_828_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
502
+ tensor<bool, [4]> var_828_squeeze_mask_0 = const()[name = string("op_828_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
503
+ tensor<fp16, [1, 1, 1536]> var_828_cast_fp16 = slice_by_index(begin = var_828_begin_0, end = var_828_end_0, end_mask = var_828_end_mask_0, squeeze_mask = var_828_squeeze_mask_0, x = var_825_cast_fp16)[name = string("op_828_cast_fp16")];
504
+ tensor<int32, [4]> var_843_begin_0 = const()[name = string("op_843_begin_0"), val = tensor<int32, [4]>([0, 18, 0, 0])];
505
+ tensor<int32, [4]> var_843_end_0 = const()[name = string("op_843_end_0"), val = tensor<int32, [4]>([1, 19, 1, 1536])];
506
+ tensor<bool, [4]> var_843_end_mask_0 = const()[name = string("op_843_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
507
+ tensor<fp16, [1, 1, 1, 1536]> var_843_cast_fp16 = slice_by_index(begin = var_843_begin_0, end = var_843_end_0, end_mask = var_843_end_mask_0, x = obj_41_cast_fp16)[name = string("op_843_cast_fp16")];
508
+ tensor<int32, [4]> var_846_begin_0 = const()[name = string("op_846_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
509
+ tensor<int32, [4]> var_846_end_0 = const()[name = string("op_846_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
510
+ tensor<bool, [4]> var_846_end_mask_0 = const()[name = string("op_846_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
511
+ tensor<bool, [4]> var_846_squeeze_mask_0 = const()[name = string("op_846_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
512
+ tensor<fp16, [1, 1, 1536]> var_846_cast_fp16 = slice_by_index(begin = var_846_begin_0, end = var_846_end_0, end_mask = var_846_end_mask_0, squeeze_mask = var_846_squeeze_mask_0, x = var_843_cast_fp16)[name = string("op_846_cast_fp16")];
513
+ tensor<int32, [4]> var_861_begin_0 = const()[name = string("op_861_begin_0"), val = tensor<int32, [4]>([0, 19, 0, 0])];
514
+ tensor<int32, [4]> var_861_end_0 = const()[name = string("op_861_end_0"), val = tensor<int32, [4]>([1, 20, 1, 1536])];
515
+ tensor<bool, [4]> var_861_end_mask_0 = const()[name = string("op_861_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
516
+ tensor<fp16, [1, 1, 1, 1536]> var_861_cast_fp16 = slice_by_index(begin = var_861_begin_0, end = var_861_end_0, end_mask = var_861_end_mask_0, x = obj_41_cast_fp16)[name = string("op_861_cast_fp16")];
517
+ tensor<int32, [4]> var_864_begin_0 = const()[name = string("op_864_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
518
+ tensor<int32, [4]> var_864_end_0 = const()[name = string("op_864_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
519
+ tensor<bool, [4]> var_864_end_mask_0 = const()[name = string("op_864_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
520
+ tensor<bool, [4]> var_864_squeeze_mask_0 = const()[name = string("op_864_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
521
+ tensor<fp16, [1, 1, 1536]> var_864_cast_fp16 = slice_by_index(begin = var_864_begin_0, end = var_864_end_0, end_mask = var_864_end_mask_0, squeeze_mask = var_864_squeeze_mask_0, x = var_861_cast_fp16)[name = string("op_864_cast_fp16")];
522
+ int32 var_871 = const()[name = string("op_871"), val = int32(1)];
523
+ bool var_872_interleave_0 = const()[name = string("op_872_interleave_0"), val = bool(false)];
524
+ tensor<fp16, [1, 20, 1536]> var_872_cast_fp16 = concat(axis = var_871, interleave = var_872_interleave_0, values = (var_522_cast_fp16, var_540_cast_fp16, var_558_cast_fp16, var_576_cast_fp16, var_594_cast_fp16, var_612_cast_fp16, var_630_cast_fp16, var_648_cast_fp16, var_666_cast_fp16, var_684_cast_fp16, var_702_cast_fp16, var_720_cast_fp16, var_738_cast_fp16, var_756_cast_fp16, var_774_cast_fp16, var_792_cast_fp16, var_810_cast_fp16, var_828_cast_fp16, var_846_cast_fp16, var_864_cast_fp16))[name = string("op_872_cast_fp16")];
525
+ bool var_875 = const()[name = string("op_875"), val = bool(false)];
526
+ tensor<int32, [1]> obj_axes_0 = const()[name = string("obj_axes_0"), val = tensor<int32, [1]>([1])];
527
+ tensor<fp16, [1, 1536]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_875, x = var_872_cast_fp16)[name = string("obj_cast_fp16")];
528
+ } -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights);
529
+ }
distil-whisper_distil-large-v3_turbo/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c1afaacaec2fac64e8867d758742347e10c849fdbf81c8761344b5c56a55b5d
3
+ size 225873332
distil-whisper_distil-large-v3_turbo/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "./distil-large-v3", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "classifier_proj_size": 256, "d_model": 1280, "decoder_attention_heads": 20, "decoder_ffn_dim": 5120, "decoder_layerdrop": 0.0, "decoder_layers": 2, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 20, "encoder_ffn_dim": 5120, "encoder_layerdrop": 0.0, "encoder_layers": 32, "eos_token_id": 50257, "init_std": 0.02, "is_encoder_decoder": true, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 32, "num_mel_bins": 128, "pad_token_id": 50256, "scale_embedding": false, "torch_dtype": "float16", "transformers_version": "4.38.0.dev0", "use_cache": true, "use_weighted_layer_sum": false, "vocab_size": 51866}
distil-whisper_distil-large-v3_turbo/generation_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alignment_heads": [[7, 0], [10, 17], [12, 18], [13, 12], [16, 1], [17, 14], [19, 11], [21, 4], [24, 1], [25, 6]], "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "decoder_start_token_id": 50258, "eos_token_id": 50257, "forced_decoder_ids": [[1, null], [2, 50360]], "is_multilingual": true, "lang_to_id": {"<|af|>": 50327, "<|am|>": 50334, "<|ar|>": 50272, "<|as|>": 50350, "<|az|>": 50304, "<|ba|>": 50355, "<|be|>": 50330, "<|bg|>": 50292, "<|bn|>": 50302, "<|bo|>": 50347, "<|br|>": 50309, "<|bs|>": 50315, "<|ca|>": 50270, "<|cs|>": 50283, "<|cy|>": 50297, "<|da|>": 50285, "<|de|>": 50261, "<|el|>": 50281, "<|en|>": 50259, "<|es|>": 50262, "<|et|>": 50307, "<|eu|>": 50310, "<|fa|>": 50300, "<|fi|>": 50277, "<|fo|>": 50338, "<|fr|>": 50265, "<|gl|>": 50319, "<|gu|>": 50333, "<|haw|>": 50352, "<|ha|>": 50354, "<|he|>": 50279, "<|hi|>": 50276, "<|hr|>": 50291, "<|ht|>": 50339, "<|hu|>": 50286, "<|hy|>": 50312, "<|id|>": 50275, "<|is|>": 50311, "<|it|>": 50274, "<|ja|>": 50266, "<|jw|>": 50356, "<|ka|>": 50329, "<|kk|>": 50316, "<|km|>": 50323, "<|kn|>": 50306, "<|ko|>": 50264, "<|la|>": 50294, "<|lb|>": 50345, "<|ln|>": 50353, "<|lo|>": 50336, "<|lt|>": 50293, "<|lv|>": 50301, "<|mg|>": 50349, "<|mi|>": 50295, "<|mk|>": 50308, "<|ml|>": 50296, "<|mn|>": 50314, "<|mr|>": 50320, "<|ms|>": 50282, "<|mt|>": 50343, "<|my|>": 50346, "<|ne|>": 50313, "<|nl|>": 50271, "<|nn|>": 50342, "<|no|>": 50288, "<|oc|>": 50328, "<|pa|>": 50321, "<|pl|>": 50269, "<|ps|>": 50340, "<|pt|>": 50267, "<|ro|>": 50284, "<|ru|>": 50263, "<|sa|>": 50344, "<|sd|>": 50332, "<|si|>": 50322, "<|sk|>": 50298, "<|sl|>": 50305, "<|sn|>": 50324, "<|so|>": 50326, "<|sq|>": 50317, "<|sr|>": 50303, "<|su|>": 50357, "<|sv|>": 50273, "<|sw|>": 50318, "<|ta|>": 50287, "<|te|>": 50299, "<|tg|>": 50331, "<|th|>": 50289, "<|tk|>": 50341, "<|tl|>": 50348, "<|tr|>": 50268, "<|tt|>": 50351, "<|uk|>": 50280, "<|ur|>": 50290, "<|uz|>": 50337, "<|vi|>": 50278, "<|yi|>": 50335, "<|yo|>": 50325, "<|yue|>": 50358, "<|zh|>": 50260}, "language": "<|en|>", "max_initial_timestamp_index": 50, "max_length": 448, "no_timestamps_token_id": 50364, "pad_token_id": 50257, "prev_sot_token_id": 50362, "return_timestamps": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50359, 50360, 50361, 50362, 50363], "task": "transcribe", "task_to_id": {"transcribe": 50360, "translate": 50359}, "transformers_version": "4.38.0.dev0"}
openai_whisper-base.en/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:384d19c754b6ca6a7ad6dd457406dd9c9de44e43034cbfaf3f343e0278e43ac9
3
+ size 243
openai_whisper-base.en/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a536e74da525305d998542cdad99de17f18771834664969738d6fa2ab99fd115
3
+ size 433
openai_whisper-base.en/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 512 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 512, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 6 × 512 × 1 × 1536)",
21
+ "shortDescription" : "",
22
+ "shape" : "[6, 512, 1, 1536]",
23
+ "name" : "encoder_attn_key_cache",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 6 × 512 × 1 × 1536)",
31
+ "shortDescription" : "",
32
+ "shape" : "[6, 512, 1, 1536]",
33
+ "name" : "encoder_attn_value_cache",
34
+ "type" : "MultiArray"
35
+ }
36
+ ],
37
+ "modelParameters" : [
38
+
39
+ ],
40
+ "specificationVersion" : 9,
41
+ "mlProgramOperationTypeHistogram" : {
42
+ "Pad" : 2,
43
+ "Ios18.batchNorm" : 13,
44
+ "Ios18.conv" : 50,
45
+ "Ios18.gelu" : 8,
46
+ "Ios18.concat" : 56,
47
+ "Ios16.einsum" : 384,
48
+ "Ios18.add" : 13,
49
+ "Ios18.softmax" : 192,
50
+ "Ios18.sliceByIndex" : 336,
51
+ "Ios18.layerNorm" : 13,
52
+ "Ios18.transpose" : 6,
53
+ "Ios18.mul" : 192
54
+ },
55
+ "computePrecision" : "Mixed (Float16, Int32)",
56
+ "isUpdatable" : "0",
57
+ "stateSchema" : [
58
+
59
+ ],
60
+ "availability" : {
61
+ "macOS" : "15.0",
62
+ "tvOS" : "18.0",
63
+ "visionOS" : "2.0",
64
+ "watchOS" : "11.0",
65
+ "iOS" : "18.0",
66
+ "macCatalyst" : "18.0"
67
+ },
68
+ "modelType" : {
69
+ "name" : "MLModelType_mlProgram"
70
+ },
71
+ "userDefinedMetadata" : {
72
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
73
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
74
+ "com.github.apple.coremltools.version" : "8.0"
75
+ },
76
+ "inputSchema" : [
77
+ {
78
+ "hasShapeFlexibility" : "0",
79
+ "isOptional" : "0",
80
+ "dataType" : "Float16",
81
+ "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
82
+ "shortDescription" : "",
83
+ "shape" : "[1, 80, 1, 3000]",
84
+ "name" : "melspectrogram_features",
85
+ "type" : "MultiArray"
86
+ }
87
+ ],
88
+ "generatedClassName" : "AudioEncoderStateful",
89
+ "method" : "predict"
90
+ }
91
+ ]
openai_whisper-base.en/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-base.en/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf7338475244e4dfd0df94300cf8449d6df39c592d5f9488360b59265a9cc80d
3
+ size 47488384
openai_whisper-base.en/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
openai_whisper-base.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc05e563ee0c556e3f578e04be5fb67b4e7520124403f2561f39102f0f2b33d
3
+ size 243
openai_whisper-base.en/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4ef11ea703011eab03287ec661f999e19c2c78cf67d531b5e6afa02e18f913d
3
+ size 328
openai_whisper-base.en/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 80, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.mul" : 2,
23
+ "Ios18.square" : 2,
24
+ "Ios18.conv" : 2,
25
+ "Ios18.matmul" : 1,
26
+ "Ios18.expandDims" : 4,
27
+ "Ios18.sub" : 1,
28
+ "Ios18.log" : 1,
29
+ "Ios18.add" : 3,
30
+ "Ios18.sliceByIndex" : 1,
31
+ "Ios18.maximum" : 1,
32
+ "Ios18.squeeze" : 2,
33
+ "Ios18.reshape" : 2,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Pad" : 1
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
39
+ "isUpdatable" : "0",
40
+ "stateSchema" : [
41
+
42
+ ],
43
+ "availability" : {
44
+ "macOS" : "15.0",
45
+ "tvOS" : "18.0",
46
+ "visionOS" : "2.0",
47
+ "watchOS" : "11.0",
48
+ "iOS" : "18.0",
49
+ "macCatalyst" : "18.0"
50
+ },
51
+ "modelType" : {
52
+ "name" : "MLModelType_mlProgram"
53
+ },
54
+ "userDefinedMetadata" : {
55
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
56
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
57
+ "com.github.apple.coremltools.version" : "8.0"
58
+ },
59
+ "inputSchema" : [
60
+ {
61
+ "hasShapeFlexibility" : "0",
62
+ "isOptional" : "0",
63
+ "dataType" : "Float16",
64
+ "formattedType" : "MultiArray (Float16 480000)",
65
+ "shortDescription" : "",
66
+ "shape" : "[480000]",
67
+ "name" : "audio",
68
+ "type" : "MultiArray"
69
+ }
70
+ ],
71
+ "generatedClassName" : "MelSpectrogram",
72
+ "method" : "predict"
73
+ }
74
+ ]