# from tvm.script import ir as I # from tvm.script import tir as T # from tvm.script import relax as R @I.ir_module class Module: @T.prim_func def apply_bitmask_inplace(var_logits: T.handle, var_seq_ids: T.handle, var_bitmask: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)}) batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) logits = T.match_buffer(var_logits, (batch_size, vocab_size)) num_seq = T.int32(is_size_var=True) seq_ids = T.match_buffer(var_seq_ids, (num_seq,), "int32") bitmask = T.match_buffer(var_bitmask, (batch_size, (vocab_size + 31) // 32), "int32") # with T.block("root"): for fused_s_v_0 in T.thread_binding((num_seq * vocab_size + 1023) // 1024, thread="blockIdx.x"): for fused_s_v_1 in T.thread_binding(1024, thread="threadIdx.x"): with T.block("block"): vs = T.axis.spatial(num_seq, (fused_s_v_0 * 1024 + fused_s_v_1) // vocab_size) vv = T.axis.spatial(vocab_size, (fused_s_v_0 * 1024 + fused_s_v_1) % vocab_size) T.where(fused_s_v_0 * 1024 + fused_s_v_1 < num_seq * vocab_size) T.reads(bitmask[seq_ids[vs], vv // 32], seq_ids[vs], logits[seq_ids[vs], vv]) T.writes(logits[seq_ids[vs], vv]) logits[seq_ids[vs], vv] = T.if_then_else(T.bitwise_and(T.shift_right(bitmask[seq_ids[vs], vv // 32], vv % 32), 1) == 1, logits[seq_ids[vs], vv], T.float32(-3.4028234663852886e+38)) @T.prim_func def apply_logit_bias_inplace(var_logits: T.handle, var_pos2seq_id: T.handle, var_token_ids: T.handle, var_logit_bias: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)}) batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) logits = T.match_buffer(var_logits, (batch_size, vocab_size)) num_token = T.int32(is_size_var=True) pos2seq_id = T.match_buffer(var_pos2seq_id, (num_token,), "int32") token_ids = T.match_buffer(var_token_ids, (num_token,), "int32") logit_bias = T.match_buffer(var_logit_bias, (num_token,)) # with T.block("root"): for p0 in T.thread_binding((num_token + 1023) // 1024, thread="blockIdx.x"): for p1 in T.thread_binding(1024, thread="threadIdx.x"): with T.block("block"): vp = T.axis.spatial(num_token, p0 * 1024 + p1) T.where(p0 * 1024 + p1 < num_token) T.reads(logits[pos2seq_id[vp], token_ids[vp]], pos2seq_id[vp], token_ids[vp], logit_bias[vp]) T.writes(logits[pos2seq_id[vp], token_ids[vp]]) logits[pos2seq_id[vp], token_ids[vp]] = logits[pos2seq_id[vp], token_ids[vp]] + logit_bias[vp] @T.prim_func def apply_penalty_inplace(var_logits: T.handle, var_seq_ids: T.handle, var_pos2seq_id: T.handle, var_token_ids: T.handle, var_token_cnt: T.handle, var_penalties: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)}) batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) logits = T.match_buffer(var_logits, (batch_size, vocab_size)) num_seq = T.int32(is_size_var=True) seq_ids = T.match_buffer(var_seq_ids, (num_seq,), "int32") num_token = T.int32(is_size_var=True) pos2seq_id = T.match_buffer(var_pos2seq_id, (num_token,), "int32") token_ids = T.match_buffer(var_token_ids, (num_token,), "int32") token_cnt = T.match_buffer(var_token_cnt, (num_token,), "int32") penalties = T.match_buffer(var_penalties, (num_seq, 3)) # with T.block("root"): for p0 in T.thread_binding((num_token + 1023) // 1024, thread="blockIdx.x"): for p1 in T.thread_binding(1024, thread="threadIdx.x"): with T.block("block"): vp = T.axis.spatial(num_token, p0 * 1024 + p1) T.where(p0 * 1024 + p1 < num_token) T.reads(logits[seq_ids[pos2seq_id[vp]], token_ids[vp]], seq_ids[pos2seq_id[vp]], pos2seq_id[vp], token_ids[vp], penalties[pos2seq_id[vp], 0:3], token_cnt[vp]) T.writes(logits[seq_ids[pos2seq_id[vp]], token_ids[vp]]) logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] = logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] - (penalties[pos2seq_id[vp], 0] + T.Cast("float32", token_cnt[vp]) * penalties[pos2seq_id[vp], 1]) logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] = T.if_then_else(logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] > T.float32(0), logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] * penalties[pos2seq_id[vp], 2], logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] / penalties[pos2seq_id[vp], 2]) @T.prim_func def batch_decode_paged_kv(_0: T.int32, Q_handle: T.handle, pages_handle: T.handle, page_table_indptr_handle: T.handle, page_table_values_handle: T.handle, var_length_info: T.handle, k_rope_pos_offset_handle: T.handle, q_rope_position_handle: T.handle, output_handle: T.handle, lse_handle: T.handle, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) B = T.int32(is_size_var=True) Q = T.match_buffer(Q_handle, (B, 20, 64), "float16") max_num_pages = T.int32(is_size_var=True) pages = T.match_buffer(pages_handle, (max_num_pages, 2, 20, 16, 64), "float16") page_table_indptr = T.match_buffer(page_table_indptr_handle, (B + 1,), "int32", offset_factor=1) nnz_pages = T.int32(is_size_var=True) page_table_values = T.match_buffer(page_table_values_handle, (nnz_pages,), "int32", offset_factor=1) length_info = T.match_buffer(var_length_info, (B,), "int32", offset_factor=1) k_rope_pos_offset = T.match_buffer(k_rope_pos_offset_handle, (B,), "int32", offset_factor=1) q_rope_position = T.match_buffer(q_rope_position_handle, (B,), "int32", offset_factor=1) output = T.match_buffer(output_handle, (B, 20, 64), "float16") lse = T.match_buffer(lse_handle, (B, 20)) # with T.block("root"): sm_scale: T.float32 = T.float32(0.18033688011112042) for bx in T.thread_binding(B, thread="blockIdx.x"): for fused_by_bz in T.thread_binding(20, thread="blockIdx.y"): for ty in T.thread_binding(1, thread="threadIdx.y"): for tx in T.thread_binding(16, thread="threadIdx.x"): for tz in T.thread_binding(32, thread="threadIdx.z"): with T.block("attn"): T.reads(page_table_indptr[bx:bx + 2], length_info[bx], q_rope_position[bx], Q[bx, fused_by_bz // 20 + ty + fused_by_bz % 20, tx * 4 - 32:tx * 4 - 32 + 68]) T.writes(output[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty, tx * 4:tx * 4 + 4], lse[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty]) Q_local = T.alloc_buffer((4,), "float16", scope="local") kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") K_smem = T.alloc_buffer((64, 64), "float16", scope="shared") V_smem = T.alloc_buffer((64, 64), "float16", scope="shared") O_allreduce = T.alloc_buffer((32, 1, 64), scope="shared") md_allreduce = T.alloc_buffer((32, 1, 2), scope="shared") S_reduce_local = T.alloc_buffer((1,), scope="local") t0 = T.alloc_buffer((1,), scope="local") S_local = T.alloc_buffer((2,), scope="local") QK_local = T.alloc_buffer((4,), scope="local") V_local = T.alloc_buffer((4,), "float16", scope="local") m_prev = T.alloc_buffer((1,), scope="local") d_prev = T.alloc_buffer((1,), scope="local") other_m = T.alloc_buffer((1,), scope="local") other_d = T.alloc_buffer((1,), scope="local") exp_mprev = T.alloc_buffer((1,), scope="local") exp_otherm = T.alloc_buffer((1,), scope="local") other_o = T.alloc_buffer((4,), scope="local") st_m = T.alloc_buffer((1,), scope="local") st_d = T.alloc_buffer((1,), scope="local") O_local = T.alloc_buffer((4,), scope="local") by: T.int32 = fused_by_bz % 20 bz: T.int32 = fused_by_bz // 20 batch_idx: T.int32 = bx cur_page_indptr_begin: T.int32 = page_table_indptr[batch_idx] cur_page_indptr_end: T.int32 = page_table_indptr[batch_idx + 1] kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[batch_idx], 0) st_m[0] = T.float32(-50000) st_d[0] = T.float32(1) for vec in T.vectorized(4): O_local[vec] = T.float32(0) for vec in T.vectorized(4): Q_local[vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", Q[bx, by + bz + ty, tx * 4 + vec]) + T.sin(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, Q[bx, by + bz + ty, tx * 4 + vec + 32] * T.float16(-1), Q[bx, by + bz + ty, tx * 4 + vec - 32]))), Q[bx, by + bz + ty, tx * 4 + vec]) for iterator in range((kv_chunk_len[0] + 63) // 64): tile_start_s: T.int32 = (tz + ty) * 2 tile_start_g: T.int32 = (iterator * 32 + tz + ty) * 2 for j in range(2): with T.block("KV_load"): T.reads() T.writes() row_g: T.int32 = tile_start_g + j if row_g < kv_chunk_len[0]: seq_offset: T.int32 = row_g page_no: T.int32 = page_table_values[cur_page_indptr_begin + seq_offset // 16] page_offset: T.int32 = seq_offset % 16 for vec in T.vectorized(4): K_smem[tile_start_s + j, tx * 4 + vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, tx * 4 + vec]) + T.sin(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, pages[page_no, 0, by, page_offset, tx * 4 + vec + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, tx * 4 + vec - 32]))), pages[page_no, 0, by, page_offset, tx * 4 + vec]) V_smem[tile_start_s + j, tx * 4 + vec] = pages[page_no, 1, by, page_offset, tx * 4 + vec] else: for vec in T.vectorized(4): K_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) V_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) T.tvm_storage_sync("shared") m_prev[0] = st_m[0] for j in range(2): for vec in T.vectorized(4): QK_local[vec] = T.Cast("float32", Q_local[vec]) * T.Cast("float32", K_smem[tz * 2 + j, tx * 4 + vec]) * attn_score_scaling_factor * sm_scale S_reduce_local[0] = T.float32(0) for vec in T.unroll(4): S_reduce_local[0] = S_reduce_local[0] + QK_local[vec] with T.block("block_cross_thread"): T.reads(S_reduce_local[0]) T.writes(t0[0]) T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), S_reduce_local[0], T.bool(True), t0[0], tx) S_local[j] = T.float32(-50000) if (iterator * 32 + tz) * 2 + j < kv_chunk_len[0]: S_local[j] = t0[0] st_m[0] = T.max(st_m[0], S_local[j]) o_scale: T.float32 = T.exp2(m_prev[0] - st_m[0]) st_d[0] = st_d[0] * o_scale for j in range(2): S_local[j] = T.exp2(S_local[j] - st_m[0]) st_d[0] = st_d[0] + S_local[j] for j in T.vectorized(4): O_local[j] = O_local[j] * o_scale for j in range(2): for vec in T.vectorized(4): V_local[vec] = V_smem[tz * 2 + j, tx * 4 + vec] for vec in T.vectorized(4): O_local[vec] = O_local[vec] + T.Cast("float32", V_local[vec]) * S_local[j] for vec in T.vectorized(4): O_allreduce[tz, ty, tx * 4 + vec] = O_local[vec] md_allreduce[tz, ty, 0] = st_m[0] md_allreduce[tz, ty, 1] = st_d[0] T.tvm_storage_sync("shared") st_m[0] = T.float32(-50000) st_d[0] = T.float32(1) for vec in T.vectorized(4): O_local[vec] = T.float32(0) for j in range(32): m_prev[0] = st_m[0] d_prev[0] = st_d[0] other_m[0] = md_allreduce[j, ty, 0] other_d[0] = md_allreduce[j, ty, 1] for vec in T.vectorized(4): other_o[vec] = O_allreduce[j, ty, tx * 4 + vec] st_m[0] = T.max(st_m[0], other_m[0]) st_d[0] = d_prev[0] * T.exp2(m_prev[0] - st_m[0]) + other_d[0] * T.exp2(other_m[0] - st_m[0]) exp_mprev[0] = T.exp2(m_prev[0] - st_m[0]) exp_otherm[0] = T.exp2(other_m[0] - st_m[0]) for vec in T.vectorized(4): O_local[vec] = O_local[vec] * exp_mprev[0] + other_o[vec] * exp_otherm[0] for vec in T.vectorized(4): O_local[vec] = O_local[vec] / st_d[0] for vec in T.vectorized(4): output[batch_idx, by + bz + ty, tx * 4 + vec] = T.Cast("float16", O_local[vec]) lse[batch_idx, by + bz + ty] = st_m[0] + T.log2(st_d[0]) @T.prim_func def batch_decode_paged_kv_sliding_window(_0: T.int32, Q_handle: T.handle, pages_handle: T.handle, page_table_indptr_handle: T.handle, page_table_values_handle: T.handle, var_length_info: T.handle, k_rope_pos_offset_handle: T.handle, q_rope_position_handle: T.handle, output_handle: T.handle, lse_handle: T.handle, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) B = T.int32(is_size_var=True) Q = T.match_buffer(Q_handle, (B, 20, 64), "float16") max_num_pages = T.int32(is_size_var=True) pages = T.match_buffer(pages_handle, (max_num_pages, 2, 20, 16, 64), "float16") page_table_indptr = T.match_buffer(page_table_indptr_handle, (B + 1,), "int32", offset_factor=1) nnz_pages = T.int32(is_size_var=True) page_table_values = T.match_buffer(page_table_values_handle, (nnz_pages,), "int32", offset_factor=1) length_info = T.match_buffer(var_length_info, (3, B), "int32", offset_factor=1) k_rope_pos_offset = T.match_buffer(k_rope_pos_offset_handle, (B,), "int32", offset_factor=1) q_rope_position = T.match_buffer(q_rope_position_handle, (B,), "int32", offset_factor=1) output = T.match_buffer(output_handle, (B, 20, 64), "float16") lse = T.match_buffer(lse_handle, (B, 20)) # with T.block("root"): sm_scale: T.float32 = T.float32(0.18033688011112042) for bx in T.thread_binding(B, thread="blockIdx.x"): for fused_by_bz in T.thread_binding(20, thread="blockIdx.y"): for ty in T.thread_binding(1, thread="threadIdx.y"): for tx in T.thread_binding(16, thread="threadIdx.x"): for tz in T.thread_binding(32, thread="threadIdx.z"): with T.block("attn"): T.reads(page_table_indptr[bx:bx + 2], length_info[0:3, bx], q_rope_position[bx], Q[bx, fused_by_bz // 20 + ty + fused_by_bz % 20, tx * 4 - 32:tx * 4 - 32 + 68]) T.writes(output[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty, tx * 4:tx * 4 + 4], lse[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty]) Q_local = T.alloc_buffer((4,), "float16", scope="local") kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") K_smem = T.alloc_buffer((64, 64), "float16", scope="shared") V_smem = T.alloc_buffer((64, 64), "float16", scope="shared") O_allreduce = T.alloc_buffer((32, 1, 64), scope="shared") md_allreduce = T.alloc_buffer((32, 1, 2), scope="shared") S_reduce_local = T.alloc_buffer((1,), scope="local") t0 = T.alloc_buffer((1,), scope="local") S_local = T.alloc_buffer((2,), scope="local") QK_local = T.alloc_buffer((4,), scope="local") V_local = T.alloc_buffer((4,), "float16", scope="local") m_prev = T.alloc_buffer((1,), scope="local") d_prev = T.alloc_buffer((1,), scope="local") other_m = T.alloc_buffer((1,), scope="local") other_d = T.alloc_buffer((1,), scope="local") exp_mprev = T.alloc_buffer((1,), scope="local") exp_otherm = T.alloc_buffer((1,), scope="local") other_o = T.alloc_buffer((4,), scope="local") st_m = T.alloc_buffer((1,), scope="local") st_d = T.alloc_buffer((1,), scope="local") O_local = T.alloc_buffer((4,), scope="local") by: T.int32 = fused_by_bz % 20 bz: T.int32 = fused_by_bz // 20 batch_idx: T.int32 = bx cur_page_indptr_begin: T.int32 = page_table_indptr[batch_idx] cur_page_indptr_end: T.int32 = page_table_indptr[batch_idx + 1] kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[0, batch_idx] - length_info[1, batch_idx] + length_info[2, batch_idx], 0) st_m[0] = T.float32(-50000) st_d[0] = T.float32(1) for vec in T.vectorized(4): O_local[vec] = T.float32(0) for vec in T.vectorized(4): Q_local[vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", Q[bx, by + bz + ty, tx * 4 + vec]) + T.sin(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, Q[bx, by + bz + ty, tx * 4 + vec + 32] * T.float16(-1), Q[bx, by + bz + ty, tx * 4 + vec - 32]))), Q[bx, by + bz + ty, tx * 4 + vec]) for iterator in range((kv_chunk_len[0] + 63) // 64): tile_start_s: T.int32 = (tz + ty) * 2 tile_start_g: T.int32 = (iterator * 32 + tz + ty) * 2 for j in range(2): with T.block("KV_load"): T.reads() T.writes() row_g: T.int32 = tile_start_g + j if row_g < kv_chunk_len[0]: seq_offset: T.int32 = T.if_then_else(row_g < length_info[2, batch_idx], row_g, row_g - length_info[2, batch_idx] + length_info[1, batch_idx]) page_no: T.int32 = page_table_values[cur_page_indptr_begin + seq_offset // 16] page_offset: T.int32 = seq_offset % 16 for vec in T.vectorized(4): K_smem[tile_start_s + j, tx * 4 + vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, tx * 4 + vec]) + T.sin(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, pages[page_no, 0, by, page_offset, tx * 4 + vec + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, tx * 4 + vec - 32]))), pages[page_no, 0, by, page_offset, tx * 4 + vec]) V_smem[tile_start_s + j, tx * 4 + vec] = pages[page_no, 1, by, page_offset, tx * 4 + vec] else: for vec in T.vectorized(4): K_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) V_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) T.tvm_storage_sync("shared") m_prev[0] = st_m[0] for j in range(2): for vec in T.vectorized(4): QK_local[vec] = T.Cast("float32", Q_local[vec]) * T.Cast("float32", K_smem[tz * 2 + j, tx * 4 + vec]) * attn_score_scaling_factor * sm_scale S_reduce_local[0] = T.float32(0) for vec in T.unroll(4): S_reduce_local[0] = S_reduce_local[0] + QK_local[vec] with T.block("block_cross_thread"): T.reads(S_reduce_local[0]) T.writes(t0[0]) T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), S_reduce_local[0], T.bool(True), t0[0], tx) S_local[j] = T.float32(-50000) if (iterator * 32 + tz) * 2 + j < kv_chunk_len[0]: S_local[j] = t0[0] st_m[0] = T.max(st_m[0], S_local[j]) o_scale: T.float32 = T.exp2(m_prev[0] - st_m[0]) st_d[0] = st_d[0] * o_scale for j in range(2): S_local[j] = T.exp2(S_local[j] - st_m[0]) st_d[0] = st_d[0] + S_local[j] for j in T.vectorized(4): O_local[j] = O_local[j] * o_scale for j in range(2): for vec in T.vectorized(4): V_local[vec] = V_smem[tz * 2 + j, tx * 4 + vec] for vec in T.vectorized(4): O_local[vec] = O_local[vec] + T.Cast("float32", V_local[vec]) * S_local[j] for vec in T.vectorized(4): O_allreduce[tz, ty, tx * 4 + vec] = O_local[vec] md_allreduce[tz, ty, 0] = st_m[0] md_allreduce[tz, ty, 1] = st_d[0] T.tvm_storage_sync("shared") st_m[0] = T.float32(-50000) st_d[0] = T.float32(1) for vec in T.vectorized(4): O_local[vec] = T.float32(0) for j in range(32): m_prev[0] = st_m[0] d_prev[0] = st_d[0] other_m[0] = md_allreduce[j, ty, 0] other_d[0] = md_allreduce[j, ty, 1] for vec in T.vectorized(4): other_o[vec] = O_allreduce[j, ty, tx * 4 + vec] st_m[0] = T.max(st_m[0], other_m[0]) st_d[0] = d_prev[0] * T.exp2(m_prev[0] - st_m[0]) + other_d[0] * T.exp2(other_m[0] - st_m[0]) exp_mprev[0] = T.exp2(m_prev[0] - st_m[0]) exp_otherm[0] = T.exp2(other_m[0] - st_m[0]) for vec in T.vectorized(4): O_local[vec] = O_local[vec] * exp_mprev[0] + other_o[vec] * exp_otherm[0] for vec in T.vectorized(4): O_local[vec] = O_local[vec] / st_d[0] for vec in T.vectorized(4): output[batch_idx, by + bz + ty, tx * 4 + vec] = T.Cast("float16", O_local[vec]) lse[batch_idx, by + bz + ty] = st_m[0] + T.log2(st_d[0]) @T.prim_func def batch_prefill_paged_kv(_0: T.int32, var_q: T.handle, var_q_indptr: T.handle, var_pages: T.handle, var_page_indptr: T.handle, var_page_values: T.handle, var_length_info: T.handle, var_k_rope_pos_offset: T.handle, var_q_rope_position: T.handle, var_output: T.handle, var_lse: T.handle, causal: T.int32, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) total_len = T.int32(is_size_var=True) q = T.match_buffer(var_q, (total_len, 20, 64), "float16") batch_size = T.int32(is_size_var=True) q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) max_num_pages = T.int32(is_size_var=True) pages = T.match_buffer(var_pages, (max_num_pages, 2, 20, 16, 64), "float16") page_indptr = T.match_buffer(var_page_indptr, (batch_size + 1,), "int32", offset_factor=1) nnz_pages = T.int32(is_size_var=True) page_values = T.match_buffer(var_page_values, (nnz_pages,), "int32", offset_factor=1) length_info = T.match_buffer(var_length_info, (batch_size,), "int32", offset_factor=1) k_rope_pos_offset = T.match_buffer(var_k_rope_pos_offset, (batch_size,), "int32", offset_factor=1) q_rope_position = T.match_buffer(var_q_rope_position, (total_len,), "int32", offset_factor=1) output = T.match_buffer(var_output, (total_len, 20, 64), "float16") lse = T.match_buffer(var_lse, (total_len, 20)) # with T.block("root"): for lbx in T.thread_binding(16, thread="blockIdx.x"): for lby in T.thread_binding(20, thread="blockIdx.y"): for lty in T.thread_binding(4, thread="threadIdx.y"): for ltx in T.thread_binding(32, thread="threadIdx.x"): with T.block("attn"): bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) T.reads() T.writes() tile_id = T.alloc_buffer((1,), "int32", scope="local") batch_idx = T.alloc_buffer((1,), "int32", scope="local") batch_tiles = T.alloc_buffer((1,), "int32", scope="local") batch_rows = T.alloc_buffer((1,), "int32", scope="local") iterator = T.alloc_buffer((1,), "int32", scope="local") kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") S_smem = T.alloc_buffer((32, 16), scope="shared") S_local = T.alloc_buffer((32, 16), scope="local") O_local = T.alloc_buffer((32, 64), scope="local") m_smem = T.alloc_buffer((32,), scope="shared") m_prev_smem = T.alloc_buffer((32,), scope="shared") d_smem = T.alloc_buffer((32,), scope="shared") m_new = T.alloc_buffer((1,), scope="local") m_prev = T.alloc_buffer((1,), scope="local") d_new = T.alloc_buffer((1,), scope="local") tile_id[0] = bx batch_idx[0] = 0 batch_rows[0] = q_indptr[1] - q_indptr[0] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 while T.tvm_thread_invariant(batch_idx[0] < batch_size): while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: tile_id[0] = tile_id[0] - batch_tiles[0] batch_idx[0] = batch_idx[0] + 1 if batch_idx[0] < batch_size: b_idx: T.int32 = batch_idx[0] batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 if T.tvm_thread_invariant(batch_idx[0] < batch_size): b_idx: T.int32 = batch_idx[0] LH_start: T.int32 = tile_id[0] * 32 q_indptr_val: T.int32 = q_indptr[b_idx] cur_page_indptr_begin: T.int32 = page_indptr[b_idx] cur_page_indptr_end: T.int32 = page_indptr[b_idx + 1] kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[b_idx], 0) T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: m_smem[row] = T.float32(-50000) d_smem[row] = T.float32(1) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads() T.writes(O_local[i, j]) O_local[i, j] = T.float32(0) T.tvm_storage_sync("shared") for li_lj_fused_0 in range(4): for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for li_lj_fused_3 in T.vectorized(4): with T.block("Q_load"): i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = q_indptr_val + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", q[cur_L, cur_H_qo, j]) + T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]))), q[cur_L, cur_H_qo, j]) else: Q_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for iterator_1 in range((kv_chunk_len[0] + 15) // 16): L_kv_start: T.int32 = iterator_1 * 16 for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("K_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_start + i if cur_L < kv_chunk_len[0]: seq_offset: T.int32 = cur_L page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] page_offset: T.int32 = seq_offset % 16 K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, j]) + T.sin(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, pages[page_no, 0, by, page_offset, j + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, j - 32]))), pages[page_no, 0, by, page_offset, j]) else: K_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("V_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_start + i if cur_L < kv_chunk_len[0]: seq_offset: T.int32 = cur_L page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] page_offset: T.int32 = seq_offset % 16 V_smem[i, j] = pages[page_no, 1, by, page_offset, j] else: V_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") with T.block(""): T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) T.writes(S_local[0:32, 0:16]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(2, 2): with T.block("S_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) T.reads() T.writes(S_local[i, j]) S_local[i, j] = T.float32(0) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): with T.block("S_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) k = T.axis.reduce(64, lk_0 * 8 + lk_1) T.reads(S_local[i, j], Q_smem[i, k], K_smem[j, k]) T.writes(S_local[i, j]) S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k]) * T.Cast("float32", K_smem[j, k]) * attn_score_scaling_factor * T.float32(0.18033688011112042) T.tvm_storage_sync("shared") for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(2, 2): with T.block("S_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) T.reads(S_local[i, j]) T.writes(S_smem[i, j]) S_smem[i, j] = S_local[i, j] T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update1"): T.reads(m_smem[row], kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) T.writes(m_prev[i], m_new[i], d_new[i]) m_prev[i] = m_smem[row] m_new[i] = m_smem[row] row_: T.int32 = LH_start + row for j in range(16): if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): m_new[i] = T.max(m_new[i], S_smem[row, j]) d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx with T.block("update"): T.reads(kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) T.writes(S_smem[row, 0:16]) for j in range(16): if row < 32: row_: T.int32 = LH_start + row if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) else: S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update"): T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) for j in range(16): d_new[i] = d_new[i] + S_smem[row, j] m_smem[row] = m_new[i] d_smem[row] = d_new[i] m_prev_smem[row] = m_prev[i] T.tvm_storage_sync("shared") with T.block(""): T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) T.writes(O_local[0:32, 0:64]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(4, 4): with T.block("O_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) T.reads() T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): with T.block("O_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) k = T.axis.reduce(16, lk_0 * 8 + lk_1) T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k], V_smem[k, j]) T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] + S_smem[i, k] * T.Cast("float32", V_smem[k, j]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) for li_0 in range(1): for li_1 in T.thread_binding(4, thread="threadIdx.y"): for li_2 in T.thread_binding(32, thread="threadIdx.x"): with T.block("lse_store"): i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) tile_id[0] = tile_id[0] + 16 @T.prim_func def batch_prefill_paged_kv_sliding_window(_0: T.int32, var_q: T.handle, var_q_indptr: T.handle, var_pages: T.handle, var_page_indptr: T.handle, var_page_values: T.handle, var_length_info: T.handle, var_k_rope_pos_offset: T.handle, var_q_rope_position: T.handle, var_output: T.handle, var_lse: T.handle, causal: T.int32, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) total_len = T.int32(is_size_var=True) q = T.match_buffer(var_q, (total_len, 20, 64), "float16") batch_size = T.int32(is_size_var=True) q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) max_num_pages = T.int32(is_size_var=True) pages = T.match_buffer(var_pages, (max_num_pages, 2, 20, 16, 64), "float16") page_indptr = T.match_buffer(var_page_indptr, (batch_size + 1,), "int32", offset_factor=1) nnz_pages = T.int32(is_size_var=True) page_values = T.match_buffer(var_page_values, (nnz_pages,), "int32", offset_factor=1) length_info = T.match_buffer(var_length_info, (3, batch_size), "int32", offset_factor=1) k_rope_pos_offset = T.match_buffer(var_k_rope_pos_offset, (batch_size,), "int32", offset_factor=1) q_rope_position = T.match_buffer(var_q_rope_position, (total_len,), "int32", offset_factor=1) output = T.match_buffer(var_output, (total_len, 20, 64), "float16") lse = T.match_buffer(var_lse, (total_len, 20)) # with T.block("root"): for lbx in T.thread_binding(16, thread="blockIdx.x"): for lby in T.thread_binding(20, thread="blockIdx.y"): for lty in T.thread_binding(4, thread="threadIdx.y"): for ltx in T.thread_binding(32, thread="threadIdx.x"): with T.block("attn"): bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) T.reads() T.writes() tile_id = T.alloc_buffer((1,), "int32", scope="local") batch_idx = T.alloc_buffer((1,), "int32", scope="local") batch_tiles = T.alloc_buffer((1,), "int32", scope="local") batch_rows = T.alloc_buffer((1,), "int32", scope="local") iterator = T.alloc_buffer((1,), "int32", scope="local") kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") S_smem = T.alloc_buffer((32, 16), scope="shared") S_local = T.alloc_buffer((32, 16), scope="local") O_local = T.alloc_buffer((32, 64), scope="local") m_smem = T.alloc_buffer((32,), scope="shared") m_prev_smem = T.alloc_buffer((32,), scope="shared") d_smem = T.alloc_buffer((32,), scope="shared") m_new = T.alloc_buffer((1,), scope="local") m_prev = T.alloc_buffer((1,), scope="local") d_new = T.alloc_buffer((1,), scope="local") tile_id[0] = bx batch_idx[0] = 0 batch_rows[0] = q_indptr[1] - q_indptr[0] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 while T.tvm_thread_invariant(batch_idx[0] < batch_size): while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: tile_id[0] = tile_id[0] - batch_tiles[0] batch_idx[0] = batch_idx[0] + 1 if batch_idx[0] < batch_size: b_idx: T.int32 = batch_idx[0] batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 if T.tvm_thread_invariant(batch_idx[0] < batch_size): b_idx: T.int32 = batch_idx[0] LH_start: T.int32 = tile_id[0] * 32 q_indptr_val: T.int32 = q_indptr[b_idx] cur_page_indptr_begin: T.int32 = page_indptr[b_idx] cur_page_indptr_end: T.int32 = page_indptr[b_idx + 1] kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[0, b_idx] - length_info[1, b_idx] + length_info[2, b_idx], 0) T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: m_smem[row] = T.float32(-50000) d_smem[row] = T.float32(1) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads() T.writes(O_local[i, j]) O_local[i, j] = T.float32(0) T.tvm_storage_sync("shared") for li_lj_fused_0 in range(4): for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for li_lj_fused_3 in T.vectorized(4): with T.block("Q_load"): i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = q_indptr_val + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", q[cur_L, cur_H_qo, j]) + T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]))), q[cur_L, cur_H_qo, j]) else: Q_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for iterator_1 in range((kv_chunk_len[0] + 15) // 16): L_kv_start: T.int32 = iterator_1 * 16 for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("K_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_start + i if cur_L < kv_chunk_len[0]: seq_offset: T.int32 = T.if_then_else(cur_L < length_info[2, b_idx], cur_L, cur_L - length_info[2, b_idx] + length_info[1, b_idx]) page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] page_offset: T.int32 = seq_offset % 16 K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, j]) + T.sin(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, pages[page_no, 0, by, page_offset, j + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, j - 32]))), pages[page_no, 0, by, page_offset, j]) else: K_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("V_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_start + i if cur_L < kv_chunk_len[0]: seq_offset: T.int32 = T.if_then_else(cur_L < length_info[2, b_idx], cur_L, cur_L - length_info[2, b_idx] + length_info[1, b_idx]) page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] page_offset: T.int32 = seq_offset % 16 V_smem[i, j] = pages[page_no, 1, by, page_offset, j] else: V_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") with T.block(""): T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) T.writes(S_local[0:32, 0:16]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(2, 2): with T.block("S_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) T.reads() T.writes(S_local[i, j]) S_local[i, j] = T.float32(0) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): with T.block("S_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) k = T.axis.reduce(64, lk_0 * 8 + lk_1) T.reads(S_local[i, j], Q_smem[i, k], K_smem[j, k]) T.writes(S_local[i, j]) S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k]) * T.Cast("float32", K_smem[j, k]) * attn_score_scaling_factor * T.float32(0.18033688011112042) T.tvm_storage_sync("shared") for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(2, 2): with T.block("S_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) T.reads(S_local[i, j]) T.writes(S_smem[i, j]) S_smem[i, j] = S_local[i, j] T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update1"): T.reads(m_smem[row], kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) T.writes(m_prev[i], m_new[i], d_new[i]) m_prev[i] = m_smem[row] m_new[i] = m_smem[row] row_: T.int32 = LH_start + row for j in range(16): if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): m_new[i] = T.max(m_new[i], S_smem[row, j]) d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx with T.block("update"): T.reads(kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) T.writes(S_smem[row, 0:16]) for j in range(16): if row < 32: row_: T.int32 = LH_start + row if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) else: S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update"): T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) for j in range(16): d_new[i] = d_new[i] + S_smem[row, j] m_smem[row] = m_new[i] d_smem[row] = d_new[i] m_prev_smem[row] = m_prev[i] T.tvm_storage_sync("shared") with T.block(""): T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) T.writes(O_local[0:32, 0:64]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(4, 4): with T.block("O_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) T.reads() T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): with T.block("O_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) k = T.axis.reduce(16, lk_0 * 8 + lk_1) T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k], V_smem[k, j]) T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] + S_smem[i, k] * T.Cast("float32", V_smem[k, j]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) for li_0 in range(1): for li_1 in T.thread_binding(4, thread="threadIdx.y"): for li_2 in T.thread_binding(32, thread="threadIdx.x"): with T.block("lse_store"): i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) tile_id[0] = tile_id[0] + 16 @T.prim_func def batch_prefill_ragged_kv(var_q: T.handle, var_q_indptr: T.handle, var_k: T.handle, var_v: T.handle, var_kv_indptr: T.handle, var_q_rope_position: T.handle, var_k_rope_pos_offset: T.handle, var_output: T.handle, var_lse: T.handle, causal: T.int32, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) qo_len = T.int32(is_size_var=True) q = T.match_buffer(var_q, (qo_len, 20, 64), "float16") batch_size = T.int32(is_size_var=True) q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) kv_len = T.int32(is_size_var=True) k = T.match_buffer(var_k, (kv_len, 20, 64), "float16") v = T.match_buffer(var_v, (kv_len, 20, 64), "float16") kv_indptr = T.match_buffer(var_kv_indptr, (batch_size + 1,), "int32", offset_factor=1) q_rope_position = T.match_buffer(var_q_rope_position, (qo_len,), "int32", offset_factor=1) k_rope_pos_offset = T.match_buffer(var_k_rope_pos_offset, (batch_size,), "int32", offset_factor=1) output = T.match_buffer(var_output, (qo_len, 20, 64), "float16") lse = T.match_buffer(var_lse, (qo_len, 20)) # with T.block("root"): for lbx in T.thread_binding(16, thread="blockIdx.x"): for lby in T.thread_binding(20, thread="blockIdx.y"): for lty in T.thread_binding(4, thread="threadIdx.y"): for ltx in T.thread_binding(32, thread="threadIdx.x"): with T.block("attn"): bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) T.reads() T.writes() tile_id = T.alloc_buffer((1,), "int32", scope="local") batch_idx = T.alloc_buffer((1,), "int32", scope="local") batch_tiles = T.alloc_buffer((1,), "int32", scope="local") batch_rows = T.alloc_buffer((1,), "int32", scope="local") iterator = T.alloc_buffer((1,), "int32", scope="local") kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") S_smem = T.alloc_buffer((32, 16), scope="shared") S_local = T.alloc_buffer((32, 16), scope="local") O_local = T.alloc_buffer((32, 64), scope="local") m_smem = T.alloc_buffer((32,), scope="shared") m_prev_smem = T.alloc_buffer((32,), scope="shared") d_smem = T.alloc_buffer((32,), scope="shared") m_new = T.alloc_buffer((1,), scope="local") m_prev = T.alloc_buffer((1,), scope="local") d_new = T.alloc_buffer((1,), scope="local") tile_id[0] = bx batch_idx[0] = 0 batch_rows[0] = q_indptr[1] - q_indptr[0] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 while T.tvm_thread_invariant(batch_idx[0] < batch_size): while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: tile_id[0] = tile_id[0] - batch_tiles[0] batch_idx[0] = batch_idx[0] + 1 if batch_idx[0] < batch_size: b_idx: T.int32 = batch_idx[0] batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 if T.tvm_thread_invariant(batch_idx[0] < batch_size): b_idx: T.int32 = batch_idx[0] q_indptr_val: T.int32 = q_indptr[b_idx] LH_start: T.int32 = tile_id[0] * 32 kv_chunk_len[0] = kv_indptr[b_idx + 1] - kv_indptr[b_idx] T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: m_smem[row] = T.float32(-50000) d_smem[row] = T.float32(1) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads() T.writes(O_local[i, j]) O_local[i, j] = T.float32(0) T.tvm_storage_sync("shared") for li_lj_fused_0 in range(4): for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for li_lj_fused_3 in T.vectorized(4): with T.block("Q_load"): i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = q_indptr_val + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", q[cur_L, cur_H_qo, j]) + T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]))), q[cur_L, cur_H_qo, j]) else: Q_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for iterator_1 in range((kv_chunk_len[0] + 15) // 16): L_kv_start: T.int32 = iterator_1 * 16 L_kv_base: T.int32 = kv_indptr[b_idx] for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("K_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_start + i if cur_L < kv_chunk_len[0]: K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", k[L_kv_base + cur_L, by, j]) + T.sin(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, k[L_kv_base + cur_L, by, j + 32] * T.float16(-1), k[L_kv_base + cur_L, by, j - 32]))), k[L_kv_base + cur_L, by, j]) else: K_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("V_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_start + i if cur_L < kv_chunk_len[0]: V_smem[i, j] = v[L_kv_base + cur_L, by, j] else: V_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") with T.block(""): T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) T.writes(S_local[0:32, 0:16]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(2, 2): with T.block("S_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) T.reads() T.writes(S_local[i, j]) S_local[i, j] = T.float32(0) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): with T.block("S_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) k_1 = T.axis.reduce(64, lk_0 * 8 + lk_1) T.reads(S_local[i, j], Q_smem[i, k_1], K_smem[j, k_1]) T.writes(S_local[i, j]) S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k_1]) * T.Cast("float32", K_smem[j, k_1]) * attn_score_scaling_factor * T.float32(0.18033688011112042) T.tvm_storage_sync("shared") for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(2, 2): with T.block("S_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) T.reads(S_local[i, j]) T.writes(S_smem[i, j]) S_smem[i, j] = S_local[i, j] T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update1"): T.reads(m_smem[row], kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) T.writes(m_prev[i], m_new[i], d_new[i]) m_prev[i] = m_smem[row] m_new[i] = m_smem[row] row_: T.int32 = LH_start + row for j in range(16): if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): m_new[i] = T.max(m_new[i], S_smem[row, j]) d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx with T.block("update"): T.reads(kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) T.writes(S_smem[row, 0:16]) for j in range(16): if row < 32: row_: T.int32 = LH_start + row if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) else: S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update"): T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) for j in range(16): d_new[i] = d_new[i] + S_smem[row, j] m_smem[row] = m_new[i] d_smem[row] = d_new[i] m_prev_smem[row] = m_prev[i] T.tvm_storage_sync("shared") with T.block(""): T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) T.writes(O_local[0:32, 0:64]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(4, 4): with T.block("O_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) T.reads() T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): with T.block("O_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) k_1 = T.axis.reduce(16, lk_0 * 8 + lk_1) T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k_1], V_smem[k_1, j]) T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] + S_smem[i, k_1] * T.Cast("float32", V_smem[k_1, j]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) for li_0 in range(1): for li_1 in T.thread_binding(4, thread="threadIdx.y"): for li_2 in T.thread_binding(32, thread="threadIdx.x"): with T.block("lse_store"): i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) tile_id[0] = tile_id[0] + 16 @T.prim_func def batch_tree_attn(var_q: T.handle, var_q_indptr: T.handle, var_k: T.handle, var_v: T.handle, var_kv_indptr: T.handle, var_q_rope_position: T.handle, var_mn_indptr: T.handle, var_mask: T.handle, var_output: T.handle, var_lse: T.handle, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32, batch_size: T.int32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) qo_len = T.int32(is_size_var=True) q = T.match_buffer(var_q, (qo_len, 20, 64), "float16") q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) kv_len = T.int32(is_size_var=True) k = T.match_buffer(var_k, (kv_len, 20, 64), "float16") v = T.match_buffer(var_v, (kv_len, 20, 64), "float16") kv_indptr = T.match_buffer(var_kv_indptr, (batch_size + 1,), "int32", offset_factor=1) q_rope_position = T.match_buffer(var_q_rope_position, (qo_len,), "int32", offset_factor=1) mn_indptr = T.match_buffer(var_mn_indptr, (batch_size + 1,), "int32", offset_factor=1) tree_size = T.int32(is_size_var=True) mask = T.match_buffer(var_mask, (tree_size,), "int32", offset_factor=1) output = T.match_buffer(var_output, (qo_len, 20, 64), "float16") lse = T.match_buffer(var_lse, (qo_len, 20)) # with T.block("root"): for lbx in T.thread_binding(16, thread="blockIdx.x"): for lby in T.thread_binding(20, thread="blockIdx.y"): for lty in T.thread_binding(4, thread="threadIdx.y"): for ltx in T.thread_binding(32, thread="threadIdx.x"): with T.block("attn"): bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) T.reads() T.writes() tile_id = T.alloc_buffer((1,), "int32", scope="local") batch_idx = T.alloc_buffer((1,), "int32", scope="local") batch_tiles = T.alloc_buffer((1,), "int32", scope="local") batch_rows = T.alloc_buffer((1,), "int32", scope="local") iterator = T.alloc_buffer((1,), "int32", scope="local") kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") S_smem = T.alloc_buffer((32, 16), scope="shared") S_local = T.alloc_buffer((32, 16), scope="local") O_local = T.alloc_buffer((32, 64), scope="local") m_smem = T.alloc_buffer((32,), scope="shared") m_prev_smem = T.alloc_buffer((32,), scope="shared") d_smem = T.alloc_buffer((32,), scope="shared") m_new = T.alloc_buffer((1,), scope="local") m_prev = T.alloc_buffer((1,), scope="local") d_new = T.alloc_buffer((1,), scope="local") tile_id[0] = bx batch_idx[0] = 0 batch_rows[0] = q_indptr[1] - q_indptr[0] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 while T.tvm_thread_invariant(batch_idx[0] < batch_size): while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: tile_id[0] = tile_id[0] - batch_tiles[0] batch_idx[0] = batch_idx[0] + 1 if batch_idx[0] < batch_size: b_idx: T.int32 = batch_idx[0] batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 if T.tvm_thread_invariant(batch_idx[0] < batch_size): b_idx: T.int32 = batch_idx[0] LH_start: T.int32 = tile_id[0] * 32 q_indptr_val: T.int32 = q_indptr[b_idx] kv_chunk_len[0] = kv_indptr[b_idx + 1] - kv_indptr[b_idx] T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: m_smem[row] = T.float32(-50000) d_smem[row] = T.float32(1) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads() T.writes(O_local[i, j]) O_local[i, j] = T.float32(0) T.tvm_storage_sync("shared") for li_lj_fused_0 in range(4): for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for li_lj_fused_3 in T.vectorized(4): with T.block("Q_load"): i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = q_indptr_val + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * q[cur_L, cur_H_qo, j] + T.Cast("float16", T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]), q[cur_L, cur_H_qo, j]) else: Q_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") for iterator_1 in range((kv_chunk_len[0] + 15) // 16): L_kv_start: T.int32 = iterator_1 * 16 L_kv_base: T.int32 = kv_indptr[b_idx] for lz_ly_fused_0 in range(2): for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): for lz_ly_fused_3 in T.vectorized(4): with T.block("KV_load"): i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) T.reads() T.writes() cur_L: T.int32 = L_kv_base + L_kv_start + i if L_kv_start + i < kv_chunk_len[0]: K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * k[cur_L, by, j] + T.Cast("float16", T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * T.if_then_else(j < 32, k[cur_L, by, j + 32] * T.float16(-1), k[cur_L, by, j - 32]), k[cur_L, by, j]) V_smem[i, j] = v[cur_L, by, j] else: K_smem[i, j] = T.float16(0) V_smem[i, j] = T.float16(0) T.tvm_storage_sync("shared") with T.block(""): T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) T.writes(S_local[0:32, 0:16]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(2, 2): with T.block("S_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) T.reads() T.writes(S_local[i, j]) S_local[i, j] = T.float32(0) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): with T.block("S_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) k_1 = T.axis.reduce(64, lk_0 * 8 + lk_1) T.reads(S_local[i, j], Q_smem[i, k_1], K_smem[j, k_1]) T.writes(S_local[i, j]) S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k_1]) * T.Cast("float32", K_smem[j, k_1]) * attn_score_scaling_factor * T.float32(0.18033688011112042) T.tvm_storage_sync("shared") for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(2, 2): with T.block("S_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) T.reads(S_local[i, j]) T.writes(S_smem[i, j]) S_smem[i, j] = S_local[i, j] T.tvm_storage_sync("shared") for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update1"): T.reads(m_smem[row], kv_chunk_len[0], mask[mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start:mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start + 16], mn_indptr[b_idx], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) T.writes(m_prev[i], m_new[i], d_new[i]) m_prev[i] = m_smem[row] m_new[i] = m_smem[row] row_: T.int32 = LH_start + row for j in range(16): if L_kv_start + j < kv_chunk_len[0] and mask[mn_indptr[b_idx] + row_ * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + (L_kv_start + j)] == 1: m_new[i] = T.max(m_new[i], S_smem[row, j]) d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx with T.block("update"): T.reads(kv_chunk_len[0], mask[mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start:mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start + 16], mn_indptr[b_idx], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) T.writes(S_smem[row, 0:16]) for j in range(16): if row < 32: row_: T.int32 = LH_start + row if L_kv_start + j < kv_chunk_len[0] and mask[mn_indptr[b_idx] + row_ * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + (L_kv_start + j)] == 1: S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) else: S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) for i in range(1): row: T.int32 = i * 32 * 4 + ty * 32 + tx if row < 32: with T.block("update"): T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) for j in range(16): d_new[i] = d_new[i] + S_smem[row, j] m_smem[row] = m_new[i] d_smem[row] = d_new[i] m_prev_smem[row] = m_prev[i] T.tvm_storage_sync("shared") with T.block(""): T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) T.writes(O_local[0:32, 0:64]) for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): for li_1_init, lj_1_init in T.grid(4, 4): with T.block("O_gemm_init"): i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) T.reads() T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): with T.block("O_gemm_update"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) k_1 = T.axis.reduce(16, lk_0 * 8 + lk_1) T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k_1], V_smem[k_1, j]) T.writes(O_local[i, j]) O_local[i, j] = O_local[i, j] + S_smem[i, k_1] * T.Cast("float32", V_smem[k_1, j]) for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): for li_1, lj_1 in T.grid(4, 4): with T.block("O_store"): i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) for li_0 in range(1): for li_1 in T.thread_binding(4, thread="threadIdx.y"): for li_2 in T.thread_binding(32, thread="threadIdx.x"): with T.block("lse_store"): i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) cur_H_qo: T.int32 = by if cur_L < q_indptr[b_idx + 1]: lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) tile_id[0] = tile_id[0] + 16 @T.prim_func(private=True) def batch_verify_on_gpu_single_kernel(var_draft_probs: T.handle, var_draft_tokens: T.handle, var_model_probs: T.handle, var_token_tree_first_child: T.handle, var_token_tree_next_sibling: T.handle, var_uniform_samples: T.handle, var_token_tree_parent_ptr: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) num_nodes, vocab_size = T.int32(is_size_var=True), T.int64() draft_probs = T.match_buffer(var_draft_probs, (num_nodes, vocab_size)) draft_tokens = T.match_buffer(var_draft_tokens, (num_nodes,), "int32") model_probs = T.match_buffer(var_model_probs, (num_nodes, vocab_size)) token_tree_first_child = T.match_buffer(var_token_tree_first_child, (num_nodes,), "int32") token_tree_next_sibling = T.match_buffer(var_token_tree_next_sibling, (num_nodes,), "int32") uniform_samples = T.match_buffer(var_uniform_samples, (num_nodes,)) nbatch = T.int32(is_size_var=True) token_tree_parent_ptr = T.match_buffer(var_token_tree_parent_ptr, (nbatch,), "int32") # with T.block("root"): child_ptr = T.alloc_buffer((1,), "int32", scope="local") parent_ptr = T.alloc_buffer((1,), "int32", scope="local") child_token = T.alloc_buffer((1,), "int32", scope="local") done = T.alloc_buffer((1,), "bool", scope="local") psum = T.alloc_buffer((1,), scope="local") t0 = T.alloc_buffer((1,), scope="local") model_prob_local = T.alloc_buffer((1,), scope="local") draft_prob_local = T.alloc_buffer((1,), scope="local") p_child = T.alloc_buffer((1,), scope="local") q_child = T.alloc_buffer((1,), scope="local") uniform_sample = T.alloc_buffer((1,), scope="local") pred_shared = T.alloc_buffer((1,), "bool", scope="shared") pred_local = T.alloc_buffer((1,), "bool", scope="local") for _bx in T.thread_binding(nbatch, thread="blockIdx.x"): for _tx in T.thread_binding(1024, thread="threadIdx.x"): with T.block("CTA"): b, tx = T.axis.remap("SS", [_bx, _tx]) T.reads(token_tree_parent_ptr[b], token_tree_first_child[T.min(parent_ptr[0], child_ptr[0]):T.min(parent_ptr[0], child_ptr[0]) + (T.max(parent_ptr[0], child_ptr[0]) + 1 - T.min(parent_ptr[0], child_ptr[0]))], parent_ptr[0], done[0], child_ptr[0], draft_tokens[child_ptr[0]], model_probs[parent_ptr[0], T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)):T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)) + (T.max(T.Cast("int64", child_token[0]), (vocab_size + T.int64(1023)) // T.int64(1024) * T.int64(1024) + T.Cast("int64", tx) - T.int64(1024)) + T.int64(1) - T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)))], child_token[0], draft_probs[child_ptr[0], T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)):T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)) + (T.max(T.Cast("int64", child_token[0]), (vocab_size + T.int64(1023)) // T.int64(1024) * T.int64(1024) + T.Cast("int64", tx) - T.int64(1024)) + T.int64(1) - T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)))], uniform_samples[child_ptr[0]], p_child[0], uniform_sample[0], q_child[0], pred_shared[0], pred_local[0], model_prob_local[0], draft_prob_local[0], psum[0], t0[0], token_tree_next_sibling[child_ptr[0]]) T.writes(parent_ptr[0], child_ptr[0], done[0], child_token[0], p_child[0], q_child[0], uniform_sample[0], pred_shared[0], pred_local[0], psum[0], model_prob_local[0], draft_prob_local[0], t0[0], model_probs[parent_ptr[0], T.Cast("int64", tx):T.Cast("int64", tx) + ((vocab_size + T.int64(1023)) // T.int64(1024) * T.int64(1024) - T.int64(1023))], token_tree_parent_ptr[b]) parent_ptr[0] = token_tree_parent_ptr[b] child_ptr[0] = token_tree_first_child[parent_ptr[0]] done[0] = T.bool(False) while not done[0]: T.tvm_storage_sync("shared") if child_ptr[0] == -1: done[0] = T.bool(True) T.tvm_storage_sync("shared") else: if tx == 0: child_token[0] = draft_tokens[child_ptr[0]] p_child[0] = model_probs[parent_ptr[0], child_token[0]] q_child[0] = draft_probs[child_ptr[0], child_token[0]] uniform_sample[0] = uniform_samples[child_ptr[0]] pred_shared[0] = p_child[0] >= uniform_sample[0] * q_child[0] T.tvm_storage_sync("shared") pred_local[0] = pred_shared[0] if pred_local[0]: parent_ptr[0] = child_ptr[0] child_ptr[0] = token_tree_first_child[child_ptr[0]] else: psum[0] = T.float32(0) for i in range((vocab_size + T.int64(1023)) // T.int64(1024)): if i * T.int64(1024) + T.Cast("int64", tx) < vocab_size: model_prob_local[0] = model_probs[parent_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] draft_prob_local[0] = draft_probs[child_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] model_prob_local[0] = T.max(model_prob_local[0] - draft_prob_local[0], T.float32(0)) psum[0] = psum[0] + model_prob_local[0] with T.block("block_cross_thread"): T.reads(psum[0]) T.writes(t0[0]) T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), psum[0], T.bool(True), t0[0], tx) if t0[0] < T.float32(9.9999999999999995e-08): parent_ptr[0] = child_ptr[0] child_ptr[0] = token_tree_first_child[child_ptr[0]] else: for i in range((vocab_size + T.int64(1023)) // T.int64(1024)): if i * T.int64(1024) + T.Cast("int64", tx) < vocab_size: model_prob_local[0] = model_probs[parent_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] draft_prob_local[0] = draft_probs[child_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] model_prob_local[0] = T.max(model_prob_local[0] - draft_prob_local[0], T.float32(0)) model_probs[parent_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] = model_prob_local[0] / t0[0] child_ptr[0] = token_tree_next_sibling[child_ptr[0]] if tx == 0: token_tree_parent_ptr[b] = parent_ptr[0] @T.prim_func def chunk_lse(var_A: T.handle, var_temperature: T.handle, var_chunked_sum: T.handle, var_chunked_max: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) batch_size, vocab_size = T.int64(is_size_var=True), T.int64(is_size_var=True) A = T.match_buffer(var_A, (batch_size, vocab_size)) temperature = T.match_buffer(var_temperature, (batch_size,)) num_chunks = T.int64(is_size_var=True) chunked_sum = T.match_buffer(var_chunked_sum, (batch_size, num_chunks)) chunked_max = T.match_buffer(var_chunked_max, (batch_size, num_chunks)) # with T.block("root"): A_pad = T.alloc_buffer((batch_size, num_chunks, T.int64(4096))) temp_max = T.alloc_buffer((batch_size, num_chunks)) temp_sum = T.alloc_buffer((batch_size, num_chunks)) for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(4096)): with T.block("pad"): v0, v1, v2 = T.axis.remap("SSS", [l0, l1, l2]) T.reads(temperature[v0], A[v0, v1 * T.int64(4096) + v2]) T.writes(A_pad[v0, v1, v2]) A_pad[v0, v1, v2] = T.if_then_else(v1 * T.int64(4096) + v2 < vocab_size, T.if_then_else(temperature[v0] > T.float32(1.0000000000000001e-05), A[v0, v1 * T.int64(4096) + v2] / temperature[v0], A[v0, v1 * T.int64(4096) + v2]), T.float32(-3.4028234663852886e+38)) for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(4096)): with T.block("max"): v0, v1, v2 = T.axis.remap("SSR", [l0, l1, l2]) T.reads(A_pad[v0, v1, v2]) T.writes(temp_max[v0, v1]) with T.init(): temp_max[v0, v1] = T.float32(-3.4028234663852886e+38) temp_max[v0, v1] = T.max(temp_max[v0, v1], A_pad[v0, v1, v2]) for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(4096)): with T.block("sum_exp"): v0, v1, v2 = T.axis.remap("SSR", [l0, l1, l2]) T.reads(temperature[v0], A_pad[v0, v1, v2], temp_max[v0, v1]) T.writes(temp_sum[v0, v1]) with T.init(): temp_sum[v0, v1] = T.float32(0) temp_sum[v0, v1] = temp_sum[v0, v1] + T.if_then_else(v1 * T.int64(4096) + v2 < vocab_size, T.Select(temperature[v0] > T.float32(1.0000000000000001e-05), T.exp(A_pad[v0, v1, v2] - temp_max[v0, v1]), T.Cast("float32", A_pad[v0, v1, v2] == temp_max[v0, v1])), T.float32(0)) for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(1)): with T.block("log"): v0, v1, v2 = T.axis.remap("SSS", [l0, l1, l2]) T.reads(temperature[v0], temp_sum[v0, v1], temp_max[v0, v1]) T.writes(chunked_sum[v0, v1], chunked_max[v0, v1]) chunked_sum[v0, v1] = T.Select(temperature[v0] > T.float32(1.0000000000000001e-05), T.log(temp_sum[v0, v1]), temp_sum[v0, v1]) chunked_max[v0, v1] = temp_max[v0, v1] @T.prim_func def compact_kv_copy(var_pages: T.handle, var_copy_length_indptr: T.handle, var_copy_src_dst_pos: T.handle, batch_size: T.int32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) num_pages = T.int32() pages = T.match_buffer(var_pages, (num_pages, 2, 20, 16, 64), "float16") copy_length_indptr = T.match_buffer(var_copy_length_indptr, (batch_size + 1,), "int32", offset_factor=1) total_copy_length = T.int32() copy_src_dst_pos = T.match_buffer(var_copy_src_dst_pos, (2, total_copy_length), "int32", offset_factor=1) with T.block("root"): T.reads() T.writes() for bhd_o in T.thread_binding((batch_size * 1280 + 1023) // 1024, thread="blockIdx.x"): for bhd_i in T.thread_binding(1024, thread="threadIdx.x"): b: T.int32 = (bhd_o * 1024 + bhd_i) // 1280 h: T.int32 = (bhd_o * 1024 + bhd_i) // 64 % 20 d: T.int32 = (bhd_o * 1024 + bhd_i) % 64 if bhd_o * 1024 + bhd_i < batch_size * 20 * 64: for i in range(copy_length_indptr[b + 1] - copy_length_indptr[b]): src_pos: T.int32 = copy_src_dst_pos[0, copy_length_indptr[b] + i] dst_pos: T.int32 = copy_src_dst_pos[1, copy_length_indptr[b] + i] pages[dst_pos // 16, 0, h, dst_pos % 16, d] = pages[src_pos // 16, 0, h, src_pos % 16, d] pages[dst_pos // 16, 1, h, dst_pos % 16, d] = pages[src_pos // 16, 1, h, src_pos % 16, d] @T.prim_func def copy_single_page(var_pages: T.handle, src_page_id: T.int64, tgt_page_id: T.int64, copy_length: T.int64): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) num_pages, page_size = T.int32(), T.int64() pages = T.match_buffer(var_pages, (num_pages, 2, 20, page_size, 64), "float16") # with T.block("root"): for b in T.thread_binding((copy_length * T.int64(1280) + T.int64(1023)) // T.int64(1024), thread="blockIdx.x"): for t in T.thread_binding(1024, thread="threadIdx.x"): with T.block("copy"): vh = T.axis.spatial(20, T.Cast("int32", (b * T.int64(1024) + T.Cast("int64", t)) // (copy_length * T.int64(64)))) vp = T.axis.spatial(copy_length, (b * T.int64(1024) + T.Cast("int64", t)) % (copy_length * T.int64(64)) // T.int64(64)) vd = T.axis.spatial(64, T.Cast("int32", (b * T.int64(1024) + T.Cast("int64", t)) % T.int64(64))) T.reads(pages[src_page_id, 0:2, vh, vp, vd]) T.writes(pages[tgt_page_id, 0:2, vh, vp, vd]) pages[tgt_page_id, 0, vh, vp, vd] = pages[src_page_id, 0, vh, vp, vd] pages[tgt_page_id, 1, vh, vp, vd] = pages[src_page_id, 1, vh, vp, vd] @T.prim_func def full(var_result: T.handle, value: T.int32): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) batch_size = T.int32(is_size_var=True) result = T.match_buffer(var_result, (batch_size, 1), "int32") # with T.block("root"): for i in range(batch_size): with T.block("block"): vi = T.axis.spatial(batch_size, i) T.reads() T.writes(result[vi, 0]) result[vi, 0] = value @T.prim_func def fused_rope(var_qkv: T.handle, var_position_map: T.handle, var_q: T.handle, var_k: T.handle, var_v: T.handle, apply_rope: T.int32): T.func_attr({"op_pattern": 8, "target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) seq_len = T.int64() qkv = T.match_buffer(var_qkv, (seq_len, 60, 64), "float16") position_map = T.match_buffer(var_position_map, (seq_len,), "int32", offset_factor=1) q = T.match_buffer(var_q, (seq_len, 20, 64), "float16") k = T.match_buffer(var_k, (seq_len, 20, 64), "float16") v = T.match_buffer(var_v, (seq_len, 20, 64), "float16") # with T.block("root"): for iters_0, iters_1, iters_2 in T.grid(seq_len, 60, 64): with T.block("llama_fused_rope"): s, h, d = T.axis.remap("SSS", [iters_0, iters_1, iters_2]) T.reads(position_map[s], qkv[s, h, d - 32:d - 32 + 65]) T.writes(q[s, h, d], k[s, h - 20, d], v[s, h - 40, d]) if h < 20: q[s, h, d] = T.if_then_else(apply_rope > 0 and d < 64, T.Cast("float16", T.cos(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", qkv[s, h, d]) + T.sin(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(d < 32, qkv[s, h, d + 32] * T.float16(-1), qkv[s, h, d - 32]))), qkv[s, h, d]) else: if h < 40: k[s, h - 20, d] = T.if_then_else(apply_rope > 0 and d < 64, T.Cast("float16", T.cos(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", qkv[s, h, d]) + T.sin(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(d < 32, qkv[s, h, d + 32] * T.float16(-1), qkv[s, h, d - 32]))), qkv[s, h, d]) else: v[s, h - 40, d] = qkv[s, h, d] @T.prim_func def gather_probs(var_src: T.handle, var_indices: T.handle, var_dst: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) m, n = T.int32(is_size_var=True), T.int32(is_size_var=True) src = T.match_buffer(var_src, (m, n)) batch_size = T.int32(is_size_var=True) indices = T.match_buffer(var_indices, (batch_size,), "int32") dst = T.match_buffer(var_dst, (batch_size, n)) # with T.block("root"): for b, j in T.grid(batch_size, n): with T.block("gather_2d"): vb, vj = T.axis.remap("SS", [b, j]) T.reads(src[indices[vb], vj], indices[vb]) T.writes(dst[vb, vj]) dst[vb, vj] = src[indices[vb], vj] @T.prim_func(private=True) def get_index_from_sorted(A: T.handle, B: T.handle, C: T.handle, D: T.handle, E: T.handle, F: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) batch, vocab_size = T.int64(), T.int64() cumsum_sorted = T.match_buffer(A, (batch, vocab_size)) indices = T.match_buffer(B, (batch, vocab_size), "int32") renorm_prob = T.match_buffer(C, (batch, 1)) out_batch = T.int64() usample = T.match_buffer(D, (out_batch, 1)) sample_indices = T.match_buffer(E, (out_batch, 1), "int32") output_index = T.match_buffer(F, (out_batch, 1), "int32") # with T.block("root"): for ax0, ax1 in T.grid(out_batch, vocab_size): with T.block("T_get_index_from_sorted"): v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1]) T.reads(usample[v_ax0, T.int64(0)], cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1 - T.int64(1):v_ax1 - T.int64(1) + T.int64(2)], sample_indices[v_ax0, T.int64(0)], renorm_prob[sample_indices[v_ax0, T.int64(0)], 0], indices[sample_indices[v_ax0, T.int64(0)], T.min(T.int64(0), v_ax1):T.min(T.int64(0), v_ax1) + (T.max(T.int64(0), v_ax1) + T.int64(1) - T.min(T.int64(0), v_ax1))]) T.writes(output_index[v_ax0, 0]) if usample[v_ax0, T.int64(0)] < cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1] / renorm_prob[sample_indices[v_ax0, T.int64(0)], 0] or v_ax1 + T.int64(1) == vocab_size: if v_ax1 == T.int64(0): output_index[v_ax0, 0] = indices[sample_indices[v_ax0, T.int64(0)], 0] else: if usample[v_ax0, T.int64(0)] >= cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1 - T.int64(1)] / renorm_prob[sample_indices[v_ax0, T.int64(0)], 0]: output_index[v_ax0, 0] = indices[sample_indices[v_ax0, T.int64(0)], v_ax1] @T.prim_func(private=True) def get_renorm_prob(A: T.handle, B: T.handle, C: T.handle, D: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) batch, vocab_size = T.int64(), T.int64() cumsum_sorted = T.match_buffer(A, (batch, vocab_size)) top_p = T.match_buffer(B, (batch, 1)) top_k = T.match_buffer(C, (batch, 1), "int32") renorm_prob = T.match_buffer(D, (batch, 1)) # with T.block("root"): for ax0, ax1 in T.grid(batch, vocab_size): with T.block("T_get_renorm_prob"): v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1]) T.reads(cumsum_sorted[v_ax0, T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)):T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + (T.max(T.max(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + T.int64(1) - T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)))], top_p[v_ax0, 0], top_k[v_ax0, 0]) T.writes(renorm_prob[v_ax0, 0]) if not (cumsum_sorted[v_ax0, 0] < top_p[v_ax0, 0] and top_k[v_ax0, 0] > 1): renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, 0] else: if cumsum_sorted[v_ax0, v_ax1] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) < T.Cast("int64", top_k[v_ax0, 0]): if v_ax1 + T.int64(1) == vocab_size: renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, v_ax1] else: if not (cumsum_sorted[v_ax0, v_ax1 + T.int64(1)] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) + T.int64(1) < T.Cast("int64", top_k[v_ax0, 0])): renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, v_ax1 + T.int64(1)] @T.prim_func(private=True) def index(var_layer_norm355: T.handle, index: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16")): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) seq_len = T.int64() layer_norm355 = T.match_buffer(var_layer_norm355, (T.int64(1), seq_len, T.int64(1280)), "float16") # with T.block("root"): for i, _, k in T.grid(T.int64(1), T.int64(1), T.int64(1280)): with T.block("index"): v_i, v__, v_k = T.axis.remap("SSS", [i, _, k]) T.reads(layer_norm355[v_i, seq_len - T.int64(1), v_k]) T.writes(index[v_i, v__, v_k]) index[v_i, v__, v_k] = layer_norm355[v_i, seq_len - T.int64(1), v_k] @T.prim_func def merge_state_inplace(v: T.handle, s: T.handle, v_other: T.handle, s_other: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) N, H, D = T.int32(is_size_var=True), T.int32(is_size_var=True), T.int32(is_size_var=True) V = T.match_buffer(v, (N, H, D), "float16") S = T.match_buffer(s, (N, H)) V_other = T.match_buffer(v_other, (N, H, D), "float16") S_other = T.match_buffer(s_other, (N, H)) # with T.block("root"): for bx in T.thread_binding(N, thread="blockIdx.x"): for by in T.thread_binding(1, thread="blockIdx.y"): for ty in T.thread_binding(20, thread="threadIdx.y"): for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block("merge"): T.reads(S[bx, ty + by * 20], S_other[bx, ty + by * 20], V[bx, ty + by * 20, tx * 4:tx * 4 + 4], V_other[bx, ty + by * 20, tx * 4:tx * 4 + 4]) T.writes(V[bx, ty + by * 20, tx * 4:tx * 4 + 4], S[bx, ty + by * 20]) s_val = T.alloc_buffer((1,), scope="local") s_other_val = T.alloc_buffer((1,), scope="local") s_max = T.alloc_buffer((1,), scope="local") scale = T.alloc_buffer((1,), scope="local") other_scale = T.alloc_buffer((1,), scope="local") v_vec = T.alloc_buffer((4,), "float16", scope="local") v_other_vec = T.alloc_buffer((4,), "float16", scope="local") s_val[0] = S[bx, ty + by * 20] s_other_val[0] = S_other[bx, ty + by * 20] s_max[0] = T.max(s_val[0], s_other_val[0]) s_val[0] = T.exp2(s_val[0] - s_max[0]) s_other_val[0] = T.exp2(s_other_val[0] - s_max[0]) scale[0] = s_val[0] / (s_val[0] + s_other_val[0]) other_scale[0] = s_other_val[0] / (s_val[0] + s_other_val[0]) for vec in T.vectorized(4): v_vec[vec] = V[bx, ty + by * 20, tx * 4 + vec] for vec in T.vectorized(4): v_other_vec[vec] = V_other[bx, ty + by * 20, tx * 4 + vec] for vec in range(4): v_vec[vec] = T.Cast("float16", T.Cast("float32", v_vec[vec]) * scale[0] + T.Cast("float32", v_other_vec[vec]) * other_scale[0]) for vec in T.vectorized(4): V[bx, ty + by * 20, tx * 4 + vec] = v_vec[vec] S[bx, ty + by * 20] = T.log2(s_val[0] + s_other_val[0]) + s_max[0] @T.prim_func def sampler_take_probs_tir(var_unsorted_probs: T.handle, var_sorted_indices: T.handle, var_sample_indices: T.handle, var_sampling_results: T.handle, var_top_prob_offsets: T.handle, var_sampled_values: T.handle, var_top_prob_probs: T.handle, var_top_prob_indices: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) unsorted_probs = T.match_buffer(var_unsorted_probs, (batch_size, vocab_size)) sorted_indices = T.match_buffer(var_sorted_indices, (batch_size, vocab_size), "int32") num_samples = T.int32(is_size_var=True) sample_indices = T.match_buffer(var_sample_indices, (num_samples,), "int32") sampling_results = T.match_buffer(var_sampling_results, (num_samples,), "int32") num_positions = T.int32(is_size_var=True) top_prob_offsets = T.match_buffer(var_top_prob_offsets, (num_positions,), "int32") sampled_values = T.match_buffer(var_sampled_values, (num_samples,)) top_prob_probs = T.match_buffer(var_top_prob_probs, (num_positions,)) top_prob_indices = T.match_buffer(var_top_prob_indices, (num_positions,), "int32") # with T.block("root"): for i in range(num_positions + num_samples): with T.block("block"): vi = T.axis.spatial(num_positions + num_samples, i) T.reads(top_prob_offsets[vi], sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], unsorted_probs[T.min(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions]):T.min(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions]) + (T.max(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions]) + 1 - T.min(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions])), T.min(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]):T.min(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]) + (T.max(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]) + 1 - T.min(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]))], sample_indices[vi - num_positions], sampling_results[vi - num_positions]) T.writes(top_prob_indices[vi], top_prob_probs[vi], sampled_values[vi - num_positions]) if vi < num_positions: row: T.int32 = top_prob_offsets[vi] // vocab_size col: T.int32 = top_prob_offsets[vi] % vocab_size top_prob_indices[vi] = sorted_indices[row, col] top_prob_probs[vi] = unsorted_probs[row, sorted_indices[row, col]] else: vj: T.int32 = vi - num_positions sampled_values[vj] = unsorted_probs[sample_indices[vj], sampling_results[vj]] @T.prim_func def scatter_probs(var_src: T.handle, var_indices: T.handle, var_dst: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) batch_size, n = T.int32(is_size_var=True), T.int32(is_size_var=True) src = T.match_buffer(var_src, (batch_size, n)) indices = T.match_buffer(var_indices, (batch_size,), "int32") m = T.int32(is_size_var=True) dst = T.match_buffer(var_dst, (m, n)) # with T.block("root"): for b, j in T.grid(batch_size, n): with T.block("scatter_2d"): vb, vj = T.axis.remap("SS", [b, j]) T.reads(src[vb, vj], indices[vb]) T.writes(dst[indices[vb], vj]) dst[indices[vb], vj] = src[vb, vj] @T.prim_func def softmax_with_chunked_sum(var_A: T.handle, var_temperature: T.handle, var_chunked_sum: T.handle, var_chunked_max: T.handle, var_softmax: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) batch_size, vocab_size = T.int64(is_size_var=True), T.int64(is_size_var=True) A = T.match_buffer(var_A, (batch_size, vocab_size)) temperature = T.match_buffer(var_temperature, (batch_size,)) num_chunks = T.int64(is_size_var=True) chunked_sum = T.match_buffer(var_chunked_sum, (batch_size, num_chunks)) chunked_max = T.match_buffer(var_chunked_max, (batch_size, num_chunks)) softmax = T.match_buffer(var_softmax, (batch_size, vocab_size)) # with T.block("root"): temp_max_shared = T.alloc_buffer((batch_size,), scope="shared") temp_sum_shared = T.alloc_buffer((batch_size,), scope="shared") for l0_l1_fused in T.thread_binding(batch_size * num_chunks, thread="blockIdx.x"): for ax0_1 in T.thread_binding(T.int64(32), thread="threadIdx.x"): for ax0_0 in T.serial((num_chunks + T.int64(31)) // T.int64(32), annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}): with T.block("max"): v0 = T.axis.spatial(batch_size, l0_l1_fused % (num_chunks * batch_size) // num_chunks) v1 = T.axis.reduce(num_chunks, ax0_0 * T.int64(32) + ax0_1) T.where(ax0_0 * T.int64(32) + ax0_1 < num_chunks) T.reads(chunked_max[v0, v1]) T.writes(temp_max_shared[v0]) with T.init(): temp_max_shared[v0] = T.float32(-3.4028234663852886e+38) temp_max_shared[v0] = T.max(temp_max_shared[v0], chunked_max[v0, v1]) for ax0_1 in T.thread_binding(T.int64(32), thread="threadIdx.x"): for ax0_0 in T.serial((num_chunks + T.int64(31)) // T.int64(32), annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}): with T.block("sum_exp"): v0 = T.axis.spatial(batch_size, l0_l1_fused % (num_chunks * batch_size) // num_chunks) v1 = T.axis.reduce(num_chunks, ax0_0 * T.int64(32) + ax0_1) T.where(ax0_0 * T.int64(32) + ax0_1 < num_chunks) T.reads(temperature[v0], chunked_sum[v0, v1], chunked_max[v0, v1], temp_max_shared[v0]) T.writes(temp_sum_shared[v0]) with T.init(): temp_sum_shared[v0] = T.float32(0) temp_sum_shared[v0] = temp_sum_shared[v0] + T.Select(temperature[v0] > T.float32(1.0000000000000001e-05), T.exp(chunked_sum[v0, v1] + chunked_max[v0, v1] - temp_max_shared[v0]), T.Cast("float32", chunked_max[v0, v1] == temp_max_shared[v0]) * chunked_sum[v0, v1]) for l2_0 in T.serial(T.int64(4), annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}): for l2_1 in T.thread_binding(T.int64(32), thread="threadIdx.y"): for l2_2 in T.thread_binding(T.int64(32), thread="threadIdx.x"): with T.block("log_pad"): v0 = T.axis.spatial(batch_size, l0_l1_fused % (num_chunks * batch_size) // num_chunks) v1 = T.axis.spatial(num_chunks, l0_l1_fused % num_chunks) v2 = T.axis.spatial(T.int64(4096), l2_0 * T.int64(1024) + l2_1 * T.int64(32) + l2_2) T.reads(temperature[v0], A[v0, v1 * T.int64(4096) + v2], temp_sum_shared[v0], temp_max_shared[v0]) T.writes(softmax[v0, v1 * T.int64(4096) + v2]) if v1 * T.int64(4096) + v2 < vocab_size: softmax[v0, v1 * T.int64(4096) + v2] = T.if_then_else(temperature[v0] > T.float32(1.0000000000000001e-05), T.exp(A[v0, v1 * T.int64(4096) + v2] / temperature[v0] - (T.log(temp_sum_shared[v0]) + temp_max_shared[v0])), T.Cast("float32", A[v0, v1 * T.int64(4096) + v2] == temp_max_shared[v0]) / temp_sum_shared[v0]) @T.prim_func(private=True) def take_sorted_probs(var_probs: T.handle, var_lv1: T.handle, var_take_sorted_probs: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) batch_size, vocab_size = T.int64(), T.int64() probs = T.match_buffer(var_probs, (batch_size, vocab_size)) lv1 = T.match_buffer(var_lv1, (batch_size, vocab_size), "int32") batch_size_1, vocab_size_1 = T.int64(), T.int64() take_sorted_probs = T.match_buffer(var_take_sorted_probs, (batch_size_1, vocab_size_1)) # with T.block("root"): for i, j in T.grid(batch_size_1, vocab_size_1): with T.block("take_sorted_probs"): v_i, v_j = T.axis.remap("SS", [i, j]) T.reads(probs[v_i, lv1[v_i, v_j]], lv1[v_i, v_j]) T.writes(take_sorted_probs[v_i, v_j]) take_sorted_probs[v_i, v_j] = probs[v_i, lv1[v_i, v_j]] @T.prim_func def tir_kv_cache_debug_get_kv(var_pages: T.handle, var_position_map: T.handle, var_k_data: T.handle, var_v_data: T.handle, layer_id: T.int64): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) num_pages, page_size = T.int64(), T.int64(is_size_var=True) pages = T.match_buffer(var_pages, (num_pages, 2, 20, page_size, 64), "float16") seqlen = T.int64(is_size_var=True) position_map = T.match_buffer(var_position_map, (seqlen,), "int32", offset_factor=1) k_data = T.match_buffer(var_k_data, (32, seqlen, 20, 64), "float16") v_data = T.match_buffer(var_v_data, (32, seqlen, 20, 64), "float16") # with T.block("root"): for p, h, d in T.grid(seqlen, 20, 64): with T.block("copy0"): vp, vh, vd = T.axis.remap("SSS", [p, h, d]) T.reads(position_map[vp], pages[T.Cast("int64", position_map[vp]) // page_size, 0:2, vh, T.Cast("int64", position_map[vp]) % page_size, vd]) T.writes(k_data[layer_id, vp, vh, vd], v_data[layer_id, vp, vh, vd]) position: T.int32 = position_map[vp] k_data[layer_id, vp, vh, vd] = pages[T.Cast("int64", position) // page_size, 0, vh, T.Cast("int64", position) % page_size, vd] v_data[layer_id, vp, vh, vd] = pages[T.Cast("int64", position) // page_size, 1, vh, T.Cast("int64", position) % page_size, vd] @T.prim_func def tir_kv_cache_transpose_append(var_pages: T.handle, var_k_data: T.handle, var_v_data: T.handle, var_position_map: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) num_pages = T.int64() pages = T.match_buffer(var_pages, (num_pages, 2, 20, 16, 64), "float16") ntoken = T.int64(is_size_var=True) k_data = T.match_buffer(var_k_data, (ntoken, 20, 64), "float16") v_data = T.match_buffer(var_v_data, (ntoken, 20, 64), "float16") position_map = T.match_buffer(var_position_map, (ntoken,), "int32", offset_factor=1) # with T.block("root"): for global_pos, h, f in T.grid(ntoken, 20, 64): if position_map[global_pos] != -1: with T.block("k_transpose_append"): vgpos, vh, vf = T.axis.remap("SSS", [global_pos, h, f]) T.reads(position_map[vgpos], k_data[vgpos, vh, vf]) T.writes(pages[position_map[vgpos] // 16, 0, vh, position_map[vgpos] % 16, vf]) position: T.int32 = position_map[vgpos] pages[position // 16, 0, vh, position % 16, vf] = k_data[vgpos, vh, vf] with T.block("v_transpose_append"): vgpos, vh, vf = T.axis.remap("SSS", [global_pos, h, f]) T.reads(position_map[vgpos], v_data[vgpos, vh, vf]) T.writes(pages[position_map[vgpos] // 16, 1, vh, position_map[vgpos] % 16, vf]) position: T.int32 = position_map[vgpos] pages[position // 16, 1, vh, position % 16, vf] = v_data[vgpos, vh, vf] @T.prim_func(private=True) def top_p_pivot_cutoff(var_prob: T.handle, var_top_p_arr: T.handle, var_init_pivots: T.handle, var_final_pivot: T.handle, var_final_lsum: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) B, N = T.int32(), T.int32() prob = T.match_buffer(var_prob, (B, N)) top_p_arr = T.match_buffer(var_top_p_arr, (B,)) init_pivots = T.match_buffer(var_init_pivots, (B, 3)) final_pivot = T.match_buffer(var_final_pivot, (B,)) final_lsum = T.match_buffer(var_final_lsum, (B,)) # with T.block("root"): pivot = T.alloc_buffer((3,), scope="local") top_p = T.alloc_buffer((1,), scope="local") L = T.alloc_buffer((1,), scope="shared") R_1 = T.alloc_buffer((1,), scope="shared") L_local = T.alloc_buffer((1,), scope="local") R_local = T.alloc_buffer((1,), scope="local") q = T.alloc_buffer((1,), scope="local") lsum = T.alloc_buffer((3,), scope="local") lmin_broadcast = T.alloc_buffer((1,), scope="shared") lmin_broadcast_local = T.alloc_buffer((1,), scope="local") lmin = T.alloc_buffer((3,), scope="local") cmin = T.alloc_buffer((3,), "int32", scope="local") total_sum = T.alloc_buffer((1,), scope="local") it = T.alloc_buffer((1,), "int32", scope="local") es_local = T.alloc_buffer((1,), "bool", scope="local") es = T.alloc_buffer((1,), "bool", scope="shared") find_pivot_local = T.alloc_buffer((1,), "bool", scope="local") find_pivot = T.alloc_buffer((1,), "bool", scope="shared") total_sum_reduce = T.alloc_buffer((1,), scope="local") lsum_reduce = T.alloc_buffer((1,), scope="local") lmin_reduce = T.alloc_buffer((1,), scope="local") cmin_reduce = T.alloc_buffer((1,), "int32", scope="local") for _bx in T.thread_binding(B, thread="blockIdx.x"): for _tx in T.thread_binding(1024, thread="threadIdx.x"): with T.block("CTA"): b, tx = T.axis.remap("SS", [_bx, _tx]) T.reads(top_p_arr[b], top_p[0], L[0], R_1[0], init_pivots[b, 0:3], L_local[0], R_local[0], find_pivot_local[0], it[0], es_local[0], prob[b, it[0] * 1024 + tx], total_sum[0], q[0], pivot[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], lsum[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], lmin[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], cmin[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], total_sum_reduce[0], es[0], lmin_reduce[0], lmin_broadcast[0], lmin_broadcast_local[0], lsum_reduce[0], cmin_reduce[0], find_pivot[0]) T.writes(top_p[0], L[0], R_1[0], find_pivot[0], L_local[0], R_local[0], pivot[0:3], find_pivot_local[0], final_lsum[b], final_pivot[b], lsum[0:3], lmin[0:3], cmin[0:3], total_sum[0], it[0], es_local[0], q[0], total_sum_reduce[0], es[0], lsum_reduce[0], lmin_reduce[0], lmin_broadcast[0], lmin_broadcast_local[0], cmin_reduce[0]) top_p[0] = top_p_arr[b] if tx == 0: L[0] = T.float32(1) - top_p[0] R_1[0] = T.float32(9.9999999999999995e-08) find_pivot[0] = T.bool(False) T.tvm_storage_sync("shared") L_local[0] = L[0] R_local[0] = R_1[0] for i in T.unroll(3): pivot[i] = init_pivots[b, i] find_pivot_local[0] = T.bool(False) if L_local[0] - R_local[0] <= T.float32(9.9999999999999995e-08): if tx == 0: final_lsum[b] = T.float32(1) final_pivot[b] = T.float32(0) find_pivot_local[0] = T.bool(True) while T.tvm_thread_invariant(L_local[0] - R_local[0] > T.float32(9.9999999999999995e-08) and not find_pivot_local[0]): T.tvm_storage_sync("shared") for pidx in T.unroll(3): lsum[pidx] = T.float32(0) lmin[pidx] = T.float32(3.4028234663852886e+38) cmin[pidx] = 0 total_sum[0] = T.float32(0) it[0] = 0 es_local[0] = T.bool(False) while it[0] < (N + 1024 - 1) // 1024 and not es_local[0]: q[0] = T.if_then_else(it[0] * 1024 + tx < N, prob[b, it[0] * 1024 + tx], T.float32(0)) total_sum[0] = total_sum[0] + q[0] for pidx in T.unroll(3): if q[0] >= pivot[pidx]: lsum[pidx] = lsum[pidx] + q[0] if lmin[pidx] > q[0]: lmin[pidx] = q[0] cmin[pidx] = 1 else: if lmin[pidx] == q[0]: cmin[pidx] = cmin[pidx] + 1 it[0] = it[0] + 1 if it[0] % 32 == 0: with T.block("block_cross_thread"): T.reads(total_sum[0]) T.writes(total_sum_reduce[0]) T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), total_sum[0], T.bool(True), total_sum_reduce[0], tx) if tx == 0: es[0] = T.float32(1) - total_sum_reduce[0] < pivot[2] T.tvm_storage_sync("shared") es_local[0] = es[0] T.tvm_storage_sync("shared") for pidx in range(3): with T.block("block_cross_thread"): T.reads(lsum[pidx]) T.writes(lsum_reduce[0]) T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), lsum[pidx], T.bool(True), lsum_reduce[0], tx) with T.block("block_cross_thread"): T.reads(lmin[pidx]) T.writes(lmin_reduce[0]) T.attr(T.comm_reducer(lambda x0, y0: T.min(x0, y0), [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), lmin[pidx], T.bool(True), lmin_reduce[0], tx) if tx == 0: lmin_broadcast[0] = lmin_reduce[0] T.tvm_storage_sync("shared") lmin_broadcast_local[0] = lmin_broadcast[0] if lmin[pidx] > lmin_broadcast_local[0]: cmin[pidx] = 0 if tx == 0: lsum[pidx] = lsum_reduce[0] lmin[pidx] = lmin_reduce[0] with T.block("block_cross_thread"): T.reads(cmin[pidx]) T.writes(cmin_reduce[0]) T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [0]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) T.tvm_thread_allreduce(T.uint32(1), cmin[pidx], T.bool(True), cmin_reduce[0], tx) if tx == 0: cmin[pidx] = cmin_reduce[0] T.tvm_storage_sync("shared") if tx == 0: it[0] = 0 while it[0] < 3 and not find_pivot_local[0]: if lsum[it[0]] >= top_p[0] and top_p[0] > lsum[it[0]] - T.Cast("float32", cmin[it[0]]) * lmin[it[0]]: find_pivot[0] = T.bool(True) find_pivot_local[0] = T.bool(True) final_pivot[b] = pivot[it[0]] final_lsum[b] = lsum[it[0]] else: if lsum[it[0]] - lmin[it[0]] * T.Cast("float32", cmin[it[0]]) >= top_p[0]: R_1[0] = pivot[it[0]] final_lsum[b] = lsum[it[0]] else: if lsum[it[0]] < top_p[0]: L[0] = pivot[it[0]] it[0] = it[0] + 1 T.tvm_storage_sync("shared") L_local[0] = L[0] R_local[0] = R_1[0] find_pivot_local[0] = find_pivot[0] for pidx in T.unroll(3): pivot[pidx] = L[0] - T.Cast("float32", pidx + 1) * (L_local[0] - R_local[0]) / T.float32(4) if tx == 0: if not find_pivot_local[0]: final_pivot[b] = R_local[0] if R_local[0] == T.float32(9.9999999999999995e-08): final_lsum[b] = lsum[2] @T.prim_func(private=True) def top_p_renorm_after_cutoff(var_prob: T.handle, var_final_pivot: T.handle, var_final_lsum: T.handle, var_renorm_prob: T.handle): T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) B, N = T.int32(), T.int32() prob = T.match_buffer(var_prob, (B, N)) final_pivot = T.match_buffer(var_final_pivot, (B,)) final_lsum = T.match_buffer(var_final_lsum, (B,)) renorm_prob = T.match_buffer(var_renorm_prob, (B, N)) # with T.block("root"): pivot = T.alloc_buffer((1,), scope="local") lsum = T.alloc_buffer((1,), scope="local") for _by in T.thread_binding(B, thread="blockIdx.y"): for _bx in T.thread_binding((B + 511) // B, thread="blockIdx.x"): for _tx in T.thread_binding(1024, thread="threadIdx.x"): with T.block("CTA"): by, bx, tx = T.axis.remap("SSS", [_by, _bx, _tx]) T.reads(final_pivot[by], final_lsum[by], prob[by, T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx:T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx + (T.Select(0 <= (B + 511) // B, (N - 1) // ((B + 511) // B * 1024) * ((B + 511) // B), 0 - (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + 1)], pivot[0], lsum[0]) T.writes(pivot[0], lsum[0], renorm_prob[by, T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx:T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx + (T.Select(0 <= (B + 511) // B, (N - 1) // ((B + 511) // B * 1024) * ((B + 511) // B), 0 - (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + 1)]) pivot[0] = final_pivot[by] lsum[0] = final_lsum[by] for i in range(((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024)): if i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx < N: renorm_prob[by, i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx] = T.if_then_else(prob[by, i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx] >= pivot[0], prob[by, i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx] / lsum[0], T.float32(0)) @R.function def argsort_probs(probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32")) -> R.Tuple(R.Tensor(("batch_size", "vocab_size"), dtype="float32"), R.Tensor(("batch_size", "vocab_size"), dtype="int32")): batch_size = T.int64() vocab_size = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) cls = Module with R.dataflow(): lv1: R.Tensor((batch_size, vocab_size), dtype="int32") = R.argsort(probs, axis=-1, descending=True, dtype="int32") lv2 = R.call_tir(cls.take_sorted_probs, (probs, lv1), out_sinfo=R.Tensor((batch_size, vocab_size), dtype="float32")) gv1: R.Tuple(R.Tensor((batch_size, vocab_size), dtype="float32"), R.Tensor((batch_size, vocab_size), dtype="int32")) = lv2, lv1 R.output(gv1) return gv1 @R.function def batch_compute_cross_attn_kv(encoder_hidden_states: R.Tensor(("batch_size", 1500, 1280), dtype="float16"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Object: batch_size = T.int64() R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) with R.dataflow(): model_encoder_conv1_weight1: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] model_encoder_conv1_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1] model_encoder_conv2_weight1: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] model_encoder_conv2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[3] model_encoder_embed_positions_weight1: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] model_encoder_layers_0_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] model_encoder_layers_0_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] model_encoder_layers_0_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[7] model_encoder_layers_0_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] model_encoder_layers_0_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[9] model_encoder_layers_0_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] model_encoder_layers_0_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[11] model_encoder_layers_0_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[12] model_encoder_layers_0_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[13] model_encoder_layers_0_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] model_encoder_layers_0_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[15] model_encoder_layers_0_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] model_encoder_layers_0_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[17] model_encoder_layers_0_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[18] model_encoder_layers_0_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[19] model_encoder_layers_1_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] model_encoder_layers_1_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] model_encoder_layers_1_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[22] model_encoder_layers_1_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] model_encoder_layers_1_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[24] model_encoder_layers_1_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] model_encoder_layers_1_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[26] model_encoder_layers_1_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[27] model_encoder_layers_1_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[28] model_encoder_layers_1_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] model_encoder_layers_1_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[30] model_encoder_layers_1_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] model_encoder_layers_1_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[32] model_encoder_layers_1_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[33] model_encoder_layers_1_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[34] model_encoder_layers_2_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] model_encoder_layers_2_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] model_encoder_layers_2_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[37] model_encoder_layers_2_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] model_encoder_layers_2_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[39] model_encoder_layers_2_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] model_encoder_layers_2_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[41] model_encoder_layers_2_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[42] model_encoder_layers_2_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[43] model_encoder_layers_2_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] model_encoder_layers_2_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[45] model_encoder_layers_2_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] model_encoder_layers_2_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[47] model_encoder_layers_2_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[48] model_encoder_layers_2_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[49] model_encoder_layers_3_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] model_encoder_layers_3_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] model_encoder_layers_3_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[52] model_encoder_layers_3_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] model_encoder_layers_3_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[54] model_encoder_layers_3_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] model_encoder_layers_3_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[56] model_encoder_layers_3_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[57] model_encoder_layers_3_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[58] model_encoder_layers_3_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] model_encoder_layers_3_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[60] model_encoder_layers_3_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] model_encoder_layers_3_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[62] model_encoder_layers_3_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[63] model_encoder_layers_3_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[64] model_encoder_layers_4_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] model_encoder_layers_4_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] model_encoder_layers_4_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[67] model_encoder_layers_4_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] model_encoder_layers_4_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[69] model_encoder_layers_4_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] model_encoder_layers_4_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[71] model_encoder_layers_4_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[72] model_encoder_layers_4_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[73] model_encoder_layers_4_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] model_encoder_layers_4_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[75] model_encoder_layers_4_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] model_encoder_layers_4_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[77] model_encoder_layers_4_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[78] model_encoder_layers_4_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[79] model_encoder_layers_5_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] model_encoder_layers_5_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] model_encoder_layers_5_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[82] model_encoder_layers_5_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] model_encoder_layers_5_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[84] model_encoder_layers_5_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] model_encoder_layers_5_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[86] model_encoder_layers_5_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[87] model_encoder_layers_5_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[88] model_encoder_layers_5_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] model_encoder_layers_5_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[90] model_encoder_layers_5_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] model_encoder_layers_5_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[92] model_encoder_layers_5_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[93] model_encoder_layers_5_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[94] model_encoder_layers_6_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] model_encoder_layers_6_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] model_encoder_layers_6_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[97] model_encoder_layers_6_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] model_encoder_layers_6_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[99] model_encoder_layers_6_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] model_encoder_layers_6_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[101] model_encoder_layers_6_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[102] model_encoder_layers_6_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[103] model_encoder_layers_6_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] model_encoder_layers_6_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[105] model_encoder_layers_6_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] model_encoder_layers_6_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[107] model_encoder_layers_6_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[108] model_encoder_layers_6_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[109] model_encoder_layers_7_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] model_encoder_layers_7_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] model_encoder_layers_7_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[112] model_encoder_layers_7_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] model_encoder_layers_7_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[114] model_encoder_layers_7_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] model_encoder_layers_7_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[116] model_encoder_layers_7_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[117] model_encoder_layers_7_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[118] model_encoder_layers_7_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] model_encoder_layers_7_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[120] model_encoder_layers_7_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] model_encoder_layers_7_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[122] model_encoder_layers_7_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[123] model_encoder_layers_7_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[124] model_encoder_layers_8_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] model_encoder_layers_8_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] model_encoder_layers_8_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[127] model_encoder_layers_8_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] model_encoder_layers_8_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[129] model_encoder_layers_8_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] model_encoder_layers_8_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[131] model_encoder_layers_8_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[132] model_encoder_layers_8_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[133] model_encoder_layers_8_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] model_encoder_layers_8_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[135] model_encoder_layers_8_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] model_encoder_layers_8_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[137] model_encoder_layers_8_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[138] model_encoder_layers_8_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[139] model_encoder_layers_9_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] model_encoder_layers_9_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] model_encoder_layers_9_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[142] model_encoder_layers_9_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] model_encoder_layers_9_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[144] model_encoder_layers_9_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] model_encoder_layers_9_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[146] model_encoder_layers_9_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[147] model_encoder_layers_9_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[148] model_encoder_layers_9_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] model_encoder_layers_9_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[150] model_encoder_layers_9_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] model_encoder_layers_9_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[152] model_encoder_layers_9_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[153] model_encoder_layers_9_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[154] model_encoder_layers_10_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] model_encoder_layers_10_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] model_encoder_layers_10_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[157] model_encoder_layers_10_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] model_encoder_layers_10_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[159] model_encoder_layers_10_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] model_encoder_layers_10_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[161] model_encoder_layers_10_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[162] model_encoder_layers_10_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[163] model_encoder_layers_10_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] model_encoder_layers_10_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[165] model_encoder_layers_10_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] model_encoder_layers_10_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[167] model_encoder_layers_10_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[168] model_encoder_layers_10_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[169] model_encoder_layers_11_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] model_encoder_layers_11_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] model_encoder_layers_11_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[172] model_encoder_layers_11_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] model_encoder_layers_11_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[174] model_encoder_layers_11_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] model_encoder_layers_11_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[176] model_encoder_layers_11_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[177] model_encoder_layers_11_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[178] model_encoder_layers_11_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] model_encoder_layers_11_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[180] model_encoder_layers_11_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] model_encoder_layers_11_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[182] model_encoder_layers_11_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[183] model_encoder_layers_11_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[184] model_encoder_layers_12_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] model_encoder_layers_12_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] model_encoder_layers_12_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[187] model_encoder_layers_12_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] model_encoder_layers_12_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[189] model_encoder_layers_12_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] model_encoder_layers_12_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[191] model_encoder_layers_12_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[192] model_encoder_layers_12_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[193] model_encoder_layers_12_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] model_encoder_layers_12_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[195] model_encoder_layers_12_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] model_encoder_layers_12_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[197] model_encoder_layers_12_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[198] model_encoder_layers_12_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[199] model_encoder_layers_13_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] model_encoder_layers_13_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] model_encoder_layers_13_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[202] model_encoder_layers_13_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] model_encoder_layers_13_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[204] model_encoder_layers_13_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] model_encoder_layers_13_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[206] model_encoder_layers_13_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[207] model_encoder_layers_13_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[208] model_encoder_layers_13_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] model_encoder_layers_13_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[210] model_encoder_layers_13_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] model_encoder_layers_13_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[212] model_encoder_layers_13_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[213] model_encoder_layers_13_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[214] model_encoder_layers_14_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] model_encoder_layers_14_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] model_encoder_layers_14_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[217] model_encoder_layers_14_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] model_encoder_layers_14_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[219] model_encoder_layers_14_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] model_encoder_layers_14_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[221] model_encoder_layers_14_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[222] model_encoder_layers_14_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[223] model_encoder_layers_14_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] model_encoder_layers_14_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[225] model_encoder_layers_14_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] model_encoder_layers_14_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[227] model_encoder_layers_14_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[228] model_encoder_layers_14_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[229] model_encoder_layers_15_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] model_encoder_layers_15_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] model_encoder_layers_15_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[232] model_encoder_layers_15_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] model_encoder_layers_15_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[234] model_encoder_layers_15_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] model_encoder_layers_15_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[236] model_encoder_layers_15_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[237] model_encoder_layers_15_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[238] model_encoder_layers_15_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] model_encoder_layers_15_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[240] model_encoder_layers_15_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] model_encoder_layers_15_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[242] model_encoder_layers_15_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[243] model_encoder_layers_15_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[244] model_encoder_layers_16_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] model_encoder_layers_16_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] model_encoder_layers_16_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[247] model_encoder_layers_16_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] model_encoder_layers_16_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[249] model_encoder_layers_16_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] model_encoder_layers_16_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[251] model_encoder_layers_16_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[252] model_encoder_layers_16_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[253] model_encoder_layers_16_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] model_encoder_layers_16_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[255] model_encoder_layers_16_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] model_encoder_layers_16_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[257] model_encoder_layers_16_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[258] model_encoder_layers_16_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[259] model_encoder_layers_17_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] model_encoder_layers_17_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] model_encoder_layers_17_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[262] model_encoder_layers_17_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] model_encoder_layers_17_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[264] model_encoder_layers_17_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] model_encoder_layers_17_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[266] model_encoder_layers_17_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[267] model_encoder_layers_17_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[268] model_encoder_layers_17_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] model_encoder_layers_17_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[270] model_encoder_layers_17_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] model_encoder_layers_17_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[272] model_encoder_layers_17_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[273] model_encoder_layers_17_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[274] model_encoder_layers_18_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] model_encoder_layers_18_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] model_encoder_layers_18_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[277] model_encoder_layers_18_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] model_encoder_layers_18_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[279] model_encoder_layers_18_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] model_encoder_layers_18_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[281] model_encoder_layers_18_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[282] model_encoder_layers_18_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[283] model_encoder_layers_18_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] model_encoder_layers_18_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[285] model_encoder_layers_18_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] model_encoder_layers_18_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[287] model_encoder_layers_18_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[288] model_encoder_layers_18_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[289] model_encoder_layers_19_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] model_encoder_layers_19_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] model_encoder_layers_19_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[292] model_encoder_layers_19_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] model_encoder_layers_19_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[294] model_encoder_layers_19_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] model_encoder_layers_19_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[296] model_encoder_layers_19_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[297] model_encoder_layers_19_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[298] model_encoder_layers_19_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] model_encoder_layers_19_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[300] model_encoder_layers_19_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] model_encoder_layers_19_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[302] model_encoder_layers_19_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[303] model_encoder_layers_19_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[304] model_encoder_layers_20_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] model_encoder_layers_20_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] model_encoder_layers_20_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[307] model_encoder_layers_20_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] model_encoder_layers_20_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[309] model_encoder_layers_20_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] model_encoder_layers_20_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[311] model_encoder_layers_20_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[312] model_encoder_layers_20_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[313] model_encoder_layers_20_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] model_encoder_layers_20_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[315] model_encoder_layers_20_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] model_encoder_layers_20_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[317] model_encoder_layers_20_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[318] model_encoder_layers_20_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[319] model_encoder_layers_21_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] model_encoder_layers_21_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] model_encoder_layers_21_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[322] model_encoder_layers_21_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] model_encoder_layers_21_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[324] model_encoder_layers_21_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] model_encoder_layers_21_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[326] model_encoder_layers_21_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[327] model_encoder_layers_21_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[328] model_encoder_layers_21_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] model_encoder_layers_21_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[330] model_encoder_layers_21_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] model_encoder_layers_21_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[332] model_encoder_layers_21_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[333] model_encoder_layers_21_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[334] model_encoder_layers_22_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] model_encoder_layers_22_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] model_encoder_layers_22_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[337] model_encoder_layers_22_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] model_encoder_layers_22_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[339] model_encoder_layers_22_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] model_encoder_layers_22_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[341] model_encoder_layers_22_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[342] model_encoder_layers_22_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[343] model_encoder_layers_22_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] model_encoder_layers_22_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[345] model_encoder_layers_22_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] model_encoder_layers_22_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[347] model_encoder_layers_22_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[348] model_encoder_layers_22_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[349] model_encoder_layers_23_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] model_encoder_layers_23_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] model_encoder_layers_23_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[352] model_encoder_layers_23_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] model_encoder_layers_23_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[354] model_encoder_layers_23_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] model_encoder_layers_23_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[356] model_encoder_layers_23_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[357] model_encoder_layers_23_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[358] model_encoder_layers_23_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] model_encoder_layers_23_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[360] model_encoder_layers_23_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] model_encoder_layers_23_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[362] model_encoder_layers_23_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[363] model_encoder_layers_23_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[364] model_encoder_layers_24_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] model_encoder_layers_24_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] model_encoder_layers_24_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[367] model_encoder_layers_24_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] model_encoder_layers_24_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[369] model_encoder_layers_24_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] model_encoder_layers_24_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[371] model_encoder_layers_24_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[372] model_encoder_layers_24_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[373] model_encoder_layers_24_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] model_encoder_layers_24_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[375] model_encoder_layers_24_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] model_encoder_layers_24_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[377] model_encoder_layers_24_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[378] model_encoder_layers_24_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[379] model_encoder_layers_25_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] model_encoder_layers_25_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] model_encoder_layers_25_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[382] model_encoder_layers_25_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] model_encoder_layers_25_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[384] model_encoder_layers_25_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] model_encoder_layers_25_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[386] model_encoder_layers_25_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[387] model_encoder_layers_25_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[388] model_encoder_layers_25_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] model_encoder_layers_25_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[390] model_encoder_layers_25_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] model_encoder_layers_25_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[392] model_encoder_layers_25_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[393] model_encoder_layers_25_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[394] model_encoder_layers_26_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] model_encoder_layers_26_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] model_encoder_layers_26_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[397] model_encoder_layers_26_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] model_encoder_layers_26_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[399] model_encoder_layers_26_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] model_encoder_layers_26_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[401] model_encoder_layers_26_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[402] model_encoder_layers_26_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[403] model_encoder_layers_26_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] model_encoder_layers_26_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[405] model_encoder_layers_26_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] model_encoder_layers_26_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[407] model_encoder_layers_26_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[408] model_encoder_layers_26_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[409] model_encoder_layers_27_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] model_encoder_layers_27_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] model_encoder_layers_27_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[412] model_encoder_layers_27_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] model_encoder_layers_27_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[414] model_encoder_layers_27_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] model_encoder_layers_27_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[416] model_encoder_layers_27_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[417] model_encoder_layers_27_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[418] model_encoder_layers_27_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] model_encoder_layers_27_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[420] model_encoder_layers_27_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] model_encoder_layers_27_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[422] model_encoder_layers_27_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[423] model_encoder_layers_27_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[424] model_encoder_layers_28_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] model_encoder_layers_28_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] model_encoder_layers_28_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[427] model_encoder_layers_28_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] model_encoder_layers_28_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[429] model_encoder_layers_28_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] model_encoder_layers_28_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[431] model_encoder_layers_28_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[432] model_encoder_layers_28_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[433] model_encoder_layers_28_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] model_encoder_layers_28_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[435] model_encoder_layers_28_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] model_encoder_layers_28_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[437] model_encoder_layers_28_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[438] model_encoder_layers_28_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[439] model_encoder_layers_29_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] model_encoder_layers_29_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] model_encoder_layers_29_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[442] model_encoder_layers_29_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] model_encoder_layers_29_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[444] model_encoder_layers_29_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] model_encoder_layers_29_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[446] model_encoder_layers_29_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[447] model_encoder_layers_29_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[448] model_encoder_layers_29_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] model_encoder_layers_29_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[450] model_encoder_layers_29_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] model_encoder_layers_29_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[452] model_encoder_layers_29_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[453] model_encoder_layers_29_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[454] model_encoder_layers_30_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] model_encoder_layers_30_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] model_encoder_layers_30_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[457] model_encoder_layers_30_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] model_encoder_layers_30_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[459] model_encoder_layers_30_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] model_encoder_layers_30_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[461] model_encoder_layers_30_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[462] model_encoder_layers_30_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[463] model_encoder_layers_30_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] model_encoder_layers_30_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[465] model_encoder_layers_30_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] model_encoder_layers_30_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[467] model_encoder_layers_30_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[468] model_encoder_layers_30_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[469] model_encoder_layers_31_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] model_encoder_layers_31_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] model_encoder_layers_31_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[472] model_encoder_layers_31_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] model_encoder_layers_31_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[474] model_encoder_layers_31_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] model_encoder_layers_31_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[476] model_encoder_layers_31_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[477] model_encoder_layers_31_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[478] model_encoder_layers_31_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] model_encoder_layers_31_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[480] model_encoder_layers_31_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] model_encoder_layers_31_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[482] model_encoder_layers_31_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[483] model_encoder_layers_31_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[484] model_encoder_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[485] model_encoder_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[486] model_decoder_embed_tokens_weight1: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] model_decoder_embed_positions_weight1: R.Tensor((448, 1280), dtype="float16") = packed_params[488] model_decoder_layers_0_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] model_decoder_layers_0_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] model_decoder_layers_0_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[491] model_decoder_layers_0_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] model_decoder_layers_0_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[493] model_decoder_layers_0_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] model_decoder_layers_0_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[495] model_decoder_layers_0_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[496] model_decoder_layers_0_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[497] model_decoder_layers_0_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] model_decoder_layers_0_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] model_decoder_layers_0_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[500] model_decoder_layers_0_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] model_decoder_layers_0_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[502] model_decoder_layers_0_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] model_decoder_layers_0_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[504] model_decoder_layers_0_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[505] model_decoder_layers_0_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[506] model_decoder_layers_0_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] model_decoder_layers_0_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[508] model_decoder_layers_0_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] model_decoder_layers_0_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[510] model_decoder_layers_0_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[511] model_decoder_layers_0_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[512] model_decoder_layers_1_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] model_decoder_layers_1_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] model_decoder_layers_1_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[515] model_decoder_layers_1_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] model_decoder_layers_1_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[517] model_decoder_layers_1_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] model_decoder_layers_1_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[519] model_decoder_layers_1_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[520] model_decoder_layers_1_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[521] model_decoder_layers_1_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] model_decoder_layers_1_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] model_decoder_layers_1_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[524] model_decoder_layers_1_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] model_decoder_layers_1_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[526] model_decoder_layers_1_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] model_decoder_layers_1_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[528] model_decoder_layers_1_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[529] model_decoder_layers_1_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[530] model_decoder_layers_1_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] model_decoder_layers_1_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[532] model_decoder_layers_1_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] model_decoder_layers_1_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[534] model_decoder_layers_1_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[535] model_decoder_layers_1_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[536] model_decoder_layers_2_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] model_decoder_layers_2_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] model_decoder_layers_2_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[539] model_decoder_layers_2_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] model_decoder_layers_2_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[541] model_decoder_layers_2_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] model_decoder_layers_2_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[543] model_decoder_layers_2_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[544] model_decoder_layers_2_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[545] model_decoder_layers_2_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] model_decoder_layers_2_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] model_decoder_layers_2_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[548] model_decoder_layers_2_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] model_decoder_layers_2_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[550] model_decoder_layers_2_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] model_decoder_layers_2_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[552] model_decoder_layers_2_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[553] model_decoder_layers_2_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[554] model_decoder_layers_2_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] model_decoder_layers_2_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[556] model_decoder_layers_2_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] model_decoder_layers_2_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[558] model_decoder_layers_2_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[559] model_decoder_layers_2_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[560] model_decoder_layers_3_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] model_decoder_layers_3_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] model_decoder_layers_3_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[563] model_decoder_layers_3_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] model_decoder_layers_3_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[565] model_decoder_layers_3_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] model_decoder_layers_3_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[567] model_decoder_layers_3_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[568] model_decoder_layers_3_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[569] model_decoder_layers_3_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] model_decoder_layers_3_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] model_decoder_layers_3_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[572] model_decoder_layers_3_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] model_decoder_layers_3_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[574] model_decoder_layers_3_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] model_decoder_layers_3_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[576] model_decoder_layers_3_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[577] model_decoder_layers_3_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[578] model_decoder_layers_3_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] model_decoder_layers_3_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[580] model_decoder_layers_3_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] model_decoder_layers_3_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[582] model_decoder_layers_3_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[583] model_decoder_layers_3_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[584] model_decoder_layers_4_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] model_decoder_layers_4_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] model_decoder_layers_4_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[587] model_decoder_layers_4_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] model_decoder_layers_4_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[589] model_decoder_layers_4_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] model_decoder_layers_4_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[591] model_decoder_layers_4_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[592] model_decoder_layers_4_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[593] model_decoder_layers_4_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] model_decoder_layers_4_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] model_decoder_layers_4_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[596] model_decoder_layers_4_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] model_decoder_layers_4_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[598] model_decoder_layers_4_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] model_decoder_layers_4_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[600] model_decoder_layers_4_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[601] model_decoder_layers_4_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[602] model_decoder_layers_4_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] model_decoder_layers_4_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[604] model_decoder_layers_4_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] model_decoder_layers_4_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[606] model_decoder_layers_4_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[607] model_decoder_layers_4_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[608] model_decoder_layers_5_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] model_decoder_layers_5_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] model_decoder_layers_5_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[611] model_decoder_layers_5_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] model_decoder_layers_5_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[613] model_decoder_layers_5_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] model_decoder_layers_5_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[615] model_decoder_layers_5_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[616] model_decoder_layers_5_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[617] model_decoder_layers_5_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] model_decoder_layers_5_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] model_decoder_layers_5_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[620] model_decoder_layers_5_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] model_decoder_layers_5_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[622] model_decoder_layers_5_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] model_decoder_layers_5_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[624] model_decoder_layers_5_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[625] model_decoder_layers_5_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[626] model_decoder_layers_5_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] model_decoder_layers_5_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[628] model_decoder_layers_5_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] model_decoder_layers_5_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[630] model_decoder_layers_5_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[631] model_decoder_layers_5_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[632] model_decoder_layers_6_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] model_decoder_layers_6_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] model_decoder_layers_6_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[635] model_decoder_layers_6_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] model_decoder_layers_6_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[637] model_decoder_layers_6_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] model_decoder_layers_6_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[639] model_decoder_layers_6_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[640] model_decoder_layers_6_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[641] model_decoder_layers_6_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] model_decoder_layers_6_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] model_decoder_layers_6_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[644] model_decoder_layers_6_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] model_decoder_layers_6_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[646] model_decoder_layers_6_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] model_decoder_layers_6_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[648] model_decoder_layers_6_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[649] model_decoder_layers_6_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[650] model_decoder_layers_6_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] model_decoder_layers_6_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[652] model_decoder_layers_6_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] model_decoder_layers_6_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[654] model_decoder_layers_6_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[655] model_decoder_layers_6_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[656] model_decoder_layers_7_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] model_decoder_layers_7_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] model_decoder_layers_7_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[659] model_decoder_layers_7_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] model_decoder_layers_7_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[661] model_decoder_layers_7_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] model_decoder_layers_7_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[663] model_decoder_layers_7_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[664] model_decoder_layers_7_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[665] model_decoder_layers_7_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] model_decoder_layers_7_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] model_decoder_layers_7_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[668] model_decoder_layers_7_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] model_decoder_layers_7_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[670] model_decoder_layers_7_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] model_decoder_layers_7_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[672] model_decoder_layers_7_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[673] model_decoder_layers_7_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[674] model_decoder_layers_7_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] model_decoder_layers_7_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[676] model_decoder_layers_7_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] model_decoder_layers_7_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[678] model_decoder_layers_7_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[679] model_decoder_layers_7_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[680] model_decoder_layers_8_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] model_decoder_layers_8_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] model_decoder_layers_8_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[683] model_decoder_layers_8_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] model_decoder_layers_8_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[685] model_decoder_layers_8_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] model_decoder_layers_8_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[687] model_decoder_layers_8_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[688] model_decoder_layers_8_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[689] model_decoder_layers_8_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] model_decoder_layers_8_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] model_decoder_layers_8_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[692] model_decoder_layers_8_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] model_decoder_layers_8_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[694] model_decoder_layers_8_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] model_decoder_layers_8_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[696] model_decoder_layers_8_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[697] model_decoder_layers_8_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[698] model_decoder_layers_8_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] model_decoder_layers_8_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[700] model_decoder_layers_8_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] model_decoder_layers_8_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[702] model_decoder_layers_8_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[703] model_decoder_layers_8_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[704] model_decoder_layers_9_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] model_decoder_layers_9_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] model_decoder_layers_9_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[707] model_decoder_layers_9_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] model_decoder_layers_9_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[709] model_decoder_layers_9_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] model_decoder_layers_9_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[711] model_decoder_layers_9_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[712] model_decoder_layers_9_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[713] model_decoder_layers_9_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] model_decoder_layers_9_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] model_decoder_layers_9_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[716] model_decoder_layers_9_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] model_decoder_layers_9_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[718] model_decoder_layers_9_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] model_decoder_layers_9_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[720] model_decoder_layers_9_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[721] model_decoder_layers_9_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[722] model_decoder_layers_9_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] model_decoder_layers_9_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[724] model_decoder_layers_9_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] model_decoder_layers_9_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[726] model_decoder_layers_9_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[727] model_decoder_layers_9_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[728] model_decoder_layers_10_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] model_decoder_layers_10_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] model_decoder_layers_10_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[731] model_decoder_layers_10_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] model_decoder_layers_10_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[733] model_decoder_layers_10_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] model_decoder_layers_10_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[735] model_decoder_layers_10_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[736] model_decoder_layers_10_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[737] model_decoder_layers_10_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] model_decoder_layers_10_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] model_decoder_layers_10_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[740] model_decoder_layers_10_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] model_decoder_layers_10_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[742] model_decoder_layers_10_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] model_decoder_layers_10_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[744] model_decoder_layers_10_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[745] model_decoder_layers_10_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[746] model_decoder_layers_10_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] model_decoder_layers_10_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[748] model_decoder_layers_10_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] model_decoder_layers_10_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[750] model_decoder_layers_10_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[751] model_decoder_layers_10_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[752] model_decoder_layers_11_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] model_decoder_layers_11_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] model_decoder_layers_11_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[755] model_decoder_layers_11_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] model_decoder_layers_11_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[757] model_decoder_layers_11_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] model_decoder_layers_11_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[759] model_decoder_layers_11_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[760] model_decoder_layers_11_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[761] model_decoder_layers_11_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] model_decoder_layers_11_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] model_decoder_layers_11_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[764] model_decoder_layers_11_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] model_decoder_layers_11_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[766] model_decoder_layers_11_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] model_decoder_layers_11_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[768] model_decoder_layers_11_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[769] model_decoder_layers_11_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[770] model_decoder_layers_11_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] model_decoder_layers_11_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[772] model_decoder_layers_11_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] model_decoder_layers_11_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[774] model_decoder_layers_11_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[775] model_decoder_layers_11_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[776] model_decoder_layers_12_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] model_decoder_layers_12_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] model_decoder_layers_12_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[779] model_decoder_layers_12_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] model_decoder_layers_12_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[781] model_decoder_layers_12_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] model_decoder_layers_12_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[783] model_decoder_layers_12_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[784] model_decoder_layers_12_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[785] model_decoder_layers_12_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] model_decoder_layers_12_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] model_decoder_layers_12_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[788] model_decoder_layers_12_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] model_decoder_layers_12_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[790] model_decoder_layers_12_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] model_decoder_layers_12_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[792] model_decoder_layers_12_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[793] model_decoder_layers_12_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[794] model_decoder_layers_12_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] model_decoder_layers_12_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[796] model_decoder_layers_12_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] model_decoder_layers_12_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[798] model_decoder_layers_12_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[799] model_decoder_layers_12_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[800] model_decoder_layers_13_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] model_decoder_layers_13_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] model_decoder_layers_13_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[803] model_decoder_layers_13_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] model_decoder_layers_13_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[805] model_decoder_layers_13_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] model_decoder_layers_13_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[807] model_decoder_layers_13_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[808] model_decoder_layers_13_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[809] model_decoder_layers_13_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] model_decoder_layers_13_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] model_decoder_layers_13_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[812] model_decoder_layers_13_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] model_decoder_layers_13_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[814] model_decoder_layers_13_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] model_decoder_layers_13_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[816] model_decoder_layers_13_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[817] model_decoder_layers_13_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[818] model_decoder_layers_13_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] model_decoder_layers_13_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[820] model_decoder_layers_13_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] model_decoder_layers_13_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[822] model_decoder_layers_13_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[823] model_decoder_layers_13_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[824] model_decoder_layers_14_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] model_decoder_layers_14_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] model_decoder_layers_14_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[827] model_decoder_layers_14_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] model_decoder_layers_14_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[829] model_decoder_layers_14_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] model_decoder_layers_14_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[831] model_decoder_layers_14_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[832] model_decoder_layers_14_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[833] model_decoder_layers_14_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] model_decoder_layers_14_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] model_decoder_layers_14_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[836] model_decoder_layers_14_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] model_decoder_layers_14_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[838] model_decoder_layers_14_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] model_decoder_layers_14_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[840] model_decoder_layers_14_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[841] model_decoder_layers_14_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[842] model_decoder_layers_14_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] model_decoder_layers_14_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[844] model_decoder_layers_14_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] model_decoder_layers_14_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[846] model_decoder_layers_14_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[847] model_decoder_layers_14_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[848] model_decoder_layers_15_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] model_decoder_layers_15_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] model_decoder_layers_15_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[851] model_decoder_layers_15_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] model_decoder_layers_15_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[853] model_decoder_layers_15_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] model_decoder_layers_15_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[855] model_decoder_layers_15_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[856] model_decoder_layers_15_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[857] model_decoder_layers_15_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] model_decoder_layers_15_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] model_decoder_layers_15_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[860] model_decoder_layers_15_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] model_decoder_layers_15_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[862] model_decoder_layers_15_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] model_decoder_layers_15_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[864] model_decoder_layers_15_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[865] model_decoder_layers_15_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[866] model_decoder_layers_15_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] model_decoder_layers_15_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[868] model_decoder_layers_15_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] model_decoder_layers_15_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[870] model_decoder_layers_15_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[871] model_decoder_layers_15_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[872] model_decoder_layers_16_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] model_decoder_layers_16_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] model_decoder_layers_16_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[875] model_decoder_layers_16_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] model_decoder_layers_16_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[877] model_decoder_layers_16_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] model_decoder_layers_16_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[879] model_decoder_layers_16_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[880] model_decoder_layers_16_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[881] model_decoder_layers_16_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] model_decoder_layers_16_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] model_decoder_layers_16_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[884] model_decoder_layers_16_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] model_decoder_layers_16_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[886] model_decoder_layers_16_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] model_decoder_layers_16_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[888] model_decoder_layers_16_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[889] model_decoder_layers_16_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[890] model_decoder_layers_16_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] model_decoder_layers_16_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[892] model_decoder_layers_16_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] model_decoder_layers_16_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[894] model_decoder_layers_16_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[895] model_decoder_layers_16_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[896] model_decoder_layers_17_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] model_decoder_layers_17_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] model_decoder_layers_17_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[899] model_decoder_layers_17_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] model_decoder_layers_17_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[901] model_decoder_layers_17_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] model_decoder_layers_17_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[903] model_decoder_layers_17_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[904] model_decoder_layers_17_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[905] model_decoder_layers_17_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] model_decoder_layers_17_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] model_decoder_layers_17_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[908] model_decoder_layers_17_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] model_decoder_layers_17_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[910] model_decoder_layers_17_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] model_decoder_layers_17_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[912] model_decoder_layers_17_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[913] model_decoder_layers_17_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[914] model_decoder_layers_17_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] model_decoder_layers_17_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[916] model_decoder_layers_17_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] model_decoder_layers_17_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[918] model_decoder_layers_17_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[919] model_decoder_layers_17_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[920] model_decoder_layers_18_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] model_decoder_layers_18_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] model_decoder_layers_18_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[923] model_decoder_layers_18_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] model_decoder_layers_18_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[925] model_decoder_layers_18_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] model_decoder_layers_18_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[927] model_decoder_layers_18_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[928] model_decoder_layers_18_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[929] model_decoder_layers_18_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] model_decoder_layers_18_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] model_decoder_layers_18_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[932] model_decoder_layers_18_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] model_decoder_layers_18_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[934] model_decoder_layers_18_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] model_decoder_layers_18_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[936] model_decoder_layers_18_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[937] model_decoder_layers_18_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[938] model_decoder_layers_18_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] model_decoder_layers_18_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[940] model_decoder_layers_18_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] model_decoder_layers_18_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[942] model_decoder_layers_18_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[943] model_decoder_layers_18_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[944] model_decoder_layers_19_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] model_decoder_layers_19_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] model_decoder_layers_19_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[947] model_decoder_layers_19_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] model_decoder_layers_19_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[949] model_decoder_layers_19_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] model_decoder_layers_19_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[951] model_decoder_layers_19_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[952] model_decoder_layers_19_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[953] model_decoder_layers_19_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] model_decoder_layers_19_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] model_decoder_layers_19_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[956] model_decoder_layers_19_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] model_decoder_layers_19_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[958] model_decoder_layers_19_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] model_decoder_layers_19_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[960] model_decoder_layers_19_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[961] model_decoder_layers_19_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[962] model_decoder_layers_19_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] model_decoder_layers_19_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[964] model_decoder_layers_19_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] model_decoder_layers_19_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[966] model_decoder_layers_19_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[967] model_decoder_layers_19_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[968] model_decoder_layers_20_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] model_decoder_layers_20_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] model_decoder_layers_20_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[971] model_decoder_layers_20_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] model_decoder_layers_20_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[973] model_decoder_layers_20_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] model_decoder_layers_20_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[975] model_decoder_layers_20_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[976] model_decoder_layers_20_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[977] model_decoder_layers_20_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] model_decoder_layers_20_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] model_decoder_layers_20_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[980] model_decoder_layers_20_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] model_decoder_layers_20_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[982] model_decoder_layers_20_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] model_decoder_layers_20_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[984] model_decoder_layers_20_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[985] model_decoder_layers_20_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[986] model_decoder_layers_20_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] model_decoder_layers_20_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[988] model_decoder_layers_20_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] model_decoder_layers_20_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[990] model_decoder_layers_20_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[991] model_decoder_layers_20_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[992] model_decoder_layers_21_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] model_decoder_layers_21_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] model_decoder_layers_21_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[995] model_decoder_layers_21_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] model_decoder_layers_21_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[997] model_decoder_layers_21_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] model_decoder_layers_21_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[999] model_decoder_layers_21_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1000] model_decoder_layers_21_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1001] model_decoder_layers_21_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] model_decoder_layers_21_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] model_decoder_layers_21_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1004] model_decoder_layers_21_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] model_decoder_layers_21_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1006] model_decoder_layers_21_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] model_decoder_layers_21_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1008] model_decoder_layers_21_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1009] model_decoder_layers_21_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1010] model_decoder_layers_21_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] model_decoder_layers_21_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1012] model_decoder_layers_21_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] model_decoder_layers_21_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1014] model_decoder_layers_21_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1015] model_decoder_layers_21_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1016] model_decoder_layers_22_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] model_decoder_layers_22_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] model_decoder_layers_22_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1019] model_decoder_layers_22_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] model_decoder_layers_22_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1021] model_decoder_layers_22_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] model_decoder_layers_22_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1023] model_decoder_layers_22_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1024] model_decoder_layers_22_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1025] model_decoder_layers_22_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] model_decoder_layers_22_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] model_decoder_layers_22_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1028] model_decoder_layers_22_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] model_decoder_layers_22_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1030] model_decoder_layers_22_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] model_decoder_layers_22_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1032] model_decoder_layers_22_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1033] model_decoder_layers_22_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1034] model_decoder_layers_22_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] model_decoder_layers_22_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1036] model_decoder_layers_22_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] model_decoder_layers_22_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1038] model_decoder_layers_22_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1039] model_decoder_layers_22_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1040] model_decoder_layers_23_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] model_decoder_layers_23_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] model_decoder_layers_23_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1043] model_decoder_layers_23_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] model_decoder_layers_23_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1045] model_decoder_layers_23_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] model_decoder_layers_23_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1047] model_decoder_layers_23_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1048] model_decoder_layers_23_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1049] model_decoder_layers_23_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] model_decoder_layers_23_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] model_decoder_layers_23_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1052] model_decoder_layers_23_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] model_decoder_layers_23_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1054] model_decoder_layers_23_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] model_decoder_layers_23_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1056] model_decoder_layers_23_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1057] model_decoder_layers_23_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1058] model_decoder_layers_23_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] model_decoder_layers_23_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1060] model_decoder_layers_23_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] model_decoder_layers_23_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1062] model_decoder_layers_23_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1063] model_decoder_layers_23_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1064] model_decoder_layers_24_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] model_decoder_layers_24_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] model_decoder_layers_24_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1067] model_decoder_layers_24_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] model_decoder_layers_24_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1069] model_decoder_layers_24_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] model_decoder_layers_24_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1071] model_decoder_layers_24_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1072] model_decoder_layers_24_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1073] model_decoder_layers_24_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] model_decoder_layers_24_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] model_decoder_layers_24_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1076] model_decoder_layers_24_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] model_decoder_layers_24_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1078] model_decoder_layers_24_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] model_decoder_layers_24_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1080] model_decoder_layers_24_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1081] model_decoder_layers_24_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1082] model_decoder_layers_24_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] model_decoder_layers_24_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1084] model_decoder_layers_24_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] model_decoder_layers_24_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1086] model_decoder_layers_24_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1087] model_decoder_layers_24_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1088] model_decoder_layers_25_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] model_decoder_layers_25_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] model_decoder_layers_25_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1091] model_decoder_layers_25_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] model_decoder_layers_25_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1093] model_decoder_layers_25_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] model_decoder_layers_25_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1095] model_decoder_layers_25_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1096] model_decoder_layers_25_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1097] model_decoder_layers_25_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] model_decoder_layers_25_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] model_decoder_layers_25_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1100] model_decoder_layers_25_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] model_decoder_layers_25_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1102] model_decoder_layers_25_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] model_decoder_layers_25_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1104] model_decoder_layers_25_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1105] model_decoder_layers_25_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1106] model_decoder_layers_25_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] model_decoder_layers_25_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1108] model_decoder_layers_25_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] model_decoder_layers_25_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1110] model_decoder_layers_25_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1111] model_decoder_layers_25_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1112] model_decoder_layers_26_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] model_decoder_layers_26_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] model_decoder_layers_26_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1115] model_decoder_layers_26_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] model_decoder_layers_26_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1117] model_decoder_layers_26_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] model_decoder_layers_26_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1119] model_decoder_layers_26_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1120] model_decoder_layers_26_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1121] model_decoder_layers_26_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] model_decoder_layers_26_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] model_decoder_layers_26_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1124] model_decoder_layers_26_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] model_decoder_layers_26_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1126] model_decoder_layers_26_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] model_decoder_layers_26_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1128] model_decoder_layers_26_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1129] model_decoder_layers_26_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1130] model_decoder_layers_26_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] model_decoder_layers_26_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1132] model_decoder_layers_26_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] model_decoder_layers_26_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1134] model_decoder_layers_26_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1135] model_decoder_layers_26_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1136] model_decoder_layers_27_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] model_decoder_layers_27_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] model_decoder_layers_27_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1139] model_decoder_layers_27_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] model_decoder_layers_27_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1141] model_decoder_layers_27_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] model_decoder_layers_27_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1143] model_decoder_layers_27_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1144] model_decoder_layers_27_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1145] model_decoder_layers_27_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] model_decoder_layers_27_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] model_decoder_layers_27_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1148] model_decoder_layers_27_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] model_decoder_layers_27_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1150] model_decoder_layers_27_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] model_decoder_layers_27_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1152] model_decoder_layers_27_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1153] model_decoder_layers_27_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1154] model_decoder_layers_27_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] model_decoder_layers_27_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1156] model_decoder_layers_27_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] model_decoder_layers_27_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1158] model_decoder_layers_27_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1159] model_decoder_layers_27_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1160] model_decoder_layers_28_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] model_decoder_layers_28_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] model_decoder_layers_28_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1163] model_decoder_layers_28_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] model_decoder_layers_28_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1165] model_decoder_layers_28_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] model_decoder_layers_28_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1167] model_decoder_layers_28_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1168] model_decoder_layers_28_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1169] model_decoder_layers_28_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] model_decoder_layers_28_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] model_decoder_layers_28_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1172] model_decoder_layers_28_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] model_decoder_layers_28_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1174] model_decoder_layers_28_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] model_decoder_layers_28_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1176] model_decoder_layers_28_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1177] model_decoder_layers_28_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1178] model_decoder_layers_28_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] model_decoder_layers_28_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1180] model_decoder_layers_28_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] model_decoder_layers_28_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1182] model_decoder_layers_28_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1183] model_decoder_layers_28_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1184] model_decoder_layers_29_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] model_decoder_layers_29_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] model_decoder_layers_29_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1187] model_decoder_layers_29_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] model_decoder_layers_29_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1189] model_decoder_layers_29_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] model_decoder_layers_29_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1191] model_decoder_layers_29_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1192] model_decoder_layers_29_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1193] model_decoder_layers_29_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] model_decoder_layers_29_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] model_decoder_layers_29_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1196] model_decoder_layers_29_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] model_decoder_layers_29_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1198] model_decoder_layers_29_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] model_decoder_layers_29_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1200] model_decoder_layers_29_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1201] model_decoder_layers_29_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1202] model_decoder_layers_29_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] model_decoder_layers_29_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1204] model_decoder_layers_29_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] model_decoder_layers_29_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1206] model_decoder_layers_29_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1207] model_decoder_layers_29_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1208] model_decoder_layers_30_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] model_decoder_layers_30_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] model_decoder_layers_30_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1211] model_decoder_layers_30_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] model_decoder_layers_30_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1213] model_decoder_layers_30_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] model_decoder_layers_30_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1215] model_decoder_layers_30_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1216] model_decoder_layers_30_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1217] model_decoder_layers_30_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] model_decoder_layers_30_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] model_decoder_layers_30_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1220] model_decoder_layers_30_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] model_decoder_layers_30_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1222] model_decoder_layers_30_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] model_decoder_layers_30_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1224] model_decoder_layers_30_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1225] model_decoder_layers_30_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1226] model_decoder_layers_30_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] model_decoder_layers_30_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1228] model_decoder_layers_30_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] model_decoder_layers_30_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1230] model_decoder_layers_30_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1231] model_decoder_layers_30_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1232] model_decoder_layers_31_self_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] model_decoder_layers_31_self_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] model_decoder_layers_31_self_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1235] model_decoder_layers_31_self_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] model_decoder_layers_31_self_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1237] model_decoder_layers_31_self_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] model_decoder_layers_31_self_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1239] model_decoder_layers_31_self_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1240] model_decoder_layers_31_self_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1241] model_decoder_layers_31_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] model_decoder_layers_31_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] model_decoder_layers_31_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1244] model_decoder_layers_31_encoder_attn_q_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] model_decoder_layers_31_encoder_attn_q_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1246] model_decoder_layers_31_encoder_attn_out_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] model_decoder_layers_31_encoder_attn_out_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1248] model_decoder_layers_31_encoder_attn_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1249] model_decoder_layers_31_encoder_attn_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1250] model_decoder_layers_31_fc1_weight1: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] model_decoder_layers_31_fc1_bias1: R.Tensor((5120,), dtype="float16") = packed_params[1252] model_decoder_layers_31_fc2_weight1: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] model_decoder_layers_31_fc2_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1254] model_decoder_layers_31_final_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1255] model_decoder_layers_31_final_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1256] model_decoder_layer_norm_weight1: R.Tensor((1280,), dtype="float16") = packed_params[1257] model_decoder_layer_norm_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1258] permute_dims193: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_k_proj_weight1, axes=None) matmul192: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims193, out_dtype="void") reshape256: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul192, R.shape([batch_size, 1500, 20, 64])) permute_dims194: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_v_proj_weight1, axes=None) matmul193: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims194, out_dtype="void") add225: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul193, model_decoder_layers_0_encoder_attn_v_proj_bias1) reshape257: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add225, R.shape([batch_size, 1500, 20, 64])) reshape258: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape256, R.shape([batch_size * 1500, 20, 64])) reshape259: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape257, R.shape([batch_size * 1500, 20, 64])) lv36: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", paged_kv_cache, R.prim_value(0), reshape258, reshape259, sinfo_args=(R.Object,)) permute_dims195: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_k_proj_weight1, axes=None) matmul194: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims195, out_dtype="void") reshape260: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul194, R.shape([batch_size, 1500, 20, 64])) permute_dims196: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_v_proj_weight1, axes=None) matmul195: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims196, out_dtype="void") add226: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul195, model_decoder_layers_1_encoder_attn_v_proj_bias1) reshape261: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add226, R.shape([batch_size, 1500, 20, 64])) reshape262: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape260, R.shape([batch_size * 1500, 20, 64])) reshape263: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape261, R.shape([batch_size * 1500, 20, 64])) lv37: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv36, R.prim_value(1), reshape262, reshape263, sinfo_args=(R.Object,)) permute_dims197: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_k_proj_weight1, axes=None) matmul196: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims197, out_dtype="void") reshape264: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul196, R.shape([batch_size, 1500, 20, 64])) permute_dims198: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_v_proj_weight1, axes=None) matmul197: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims198, out_dtype="void") add227: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul197, model_decoder_layers_2_encoder_attn_v_proj_bias1) reshape265: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add227, R.shape([batch_size, 1500, 20, 64])) reshape266: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape264, R.shape([batch_size * 1500, 20, 64])) reshape267: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape265, R.shape([batch_size * 1500, 20, 64])) lv38: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv37, R.prim_value(2), reshape266, reshape267, sinfo_args=(R.Object,)) permute_dims199: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_k_proj_weight1, axes=None) matmul198: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims199, out_dtype="void") reshape268: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul198, R.shape([batch_size, 1500, 20, 64])) permute_dims200: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_v_proj_weight1, axes=None) matmul199: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims200, out_dtype="void") add228: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul199, model_decoder_layers_3_encoder_attn_v_proj_bias1) reshape269: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add228, R.shape([batch_size, 1500, 20, 64])) reshape270: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape268, R.shape([batch_size * 1500, 20, 64])) reshape271: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape269, R.shape([batch_size * 1500, 20, 64])) lv39: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv38, R.prim_value(3), reshape270, reshape271, sinfo_args=(R.Object,)) permute_dims201: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_k_proj_weight1, axes=None) matmul200: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims201, out_dtype="void") reshape272: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul200, R.shape([batch_size, 1500, 20, 64])) permute_dims202: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_v_proj_weight1, axes=None) matmul201: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims202, out_dtype="void") add229: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul201, model_decoder_layers_4_encoder_attn_v_proj_bias1) reshape273: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add229, R.shape([batch_size, 1500, 20, 64])) reshape274: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape272, R.shape([batch_size * 1500, 20, 64])) reshape275: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape273, R.shape([batch_size * 1500, 20, 64])) lv40: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv39, R.prim_value(4), reshape274, reshape275, sinfo_args=(R.Object,)) permute_dims203: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_k_proj_weight1, axes=None) matmul202: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims203, out_dtype="void") reshape276: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul202, R.shape([batch_size, 1500, 20, 64])) permute_dims204: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_v_proj_weight1, axes=None) matmul203: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims204, out_dtype="void") add230: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul203, model_decoder_layers_5_encoder_attn_v_proj_bias1) reshape277: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add230, R.shape([batch_size, 1500, 20, 64])) reshape278: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape276, R.shape([batch_size * 1500, 20, 64])) reshape279: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape277, R.shape([batch_size * 1500, 20, 64])) lv41: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv40, R.prim_value(5), reshape278, reshape279, sinfo_args=(R.Object,)) permute_dims205: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_k_proj_weight1, axes=None) matmul204: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims205, out_dtype="void") reshape280: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul204, R.shape([batch_size, 1500, 20, 64])) permute_dims206: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_v_proj_weight1, axes=None) matmul205: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims206, out_dtype="void") add231: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul205, model_decoder_layers_6_encoder_attn_v_proj_bias1) reshape281: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add231, R.shape([batch_size, 1500, 20, 64])) reshape282: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape280, R.shape([batch_size * 1500, 20, 64])) reshape283: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape281, R.shape([batch_size * 1500, 20, 64])) lv42: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv41, R.prim_value(6), reshape282, reshape283, sinfo_args=(R.Object,)) permute_dims207: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_k_proj_weight1, axes=None) matmul206: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims207, out_dtype="void") reshape284: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul206, R.shape([batch_size, 1500, 20, 64])) permute_dims208: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_v_proj_weight1, axes=None) matmul207: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims208, out_dtype="void") add232: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul207, model_decoder_layers_7_encoder_attn_v_proj_bias1) reshape285: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add232, R.shape([batch_size, 1500, 20, 64])) reshape286: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape284, R.shape([batch_size * 1500, 20, 64])) reshape287: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape285, R.shape([batch_size * 1500, 20, 64])) lv43: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv42, R.prim_value(7), reshape286, reshape287, sinfo_args=(R.Object,)) permute_dims209: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_k_proj_weight1, axes=None) matmul208: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims209, out_dtype="void") reshape288: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul208, R.shape([batch_size, 1500, 20, 64])) permute_dims210: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_v_proj_weight1, axes=None) matmul209: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims210, out_dtype="void") add233: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul209, model_decoder_layers_8_encoder_attn_v_proj_bias1) reshape289: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add233, R.shape([batch_size, 1500, 20, 64])) reshape290: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape288, R.shape([batch_size * 1500, 20, 64])) reshape291: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape289, R.shape([batch_size * 1500, 20, 64])) lv44: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv43, R.prim_value(8), reshape290, reshape291, sinfo_args=(R.Object,)) permute_dims211: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_k_proj_weight1, axes=None) matmul210: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims211, out_dtype="void") reshape292: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul210, R.shape([batch_size, 1500, 20, 64])) permute_dims212: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_v_proj_weight1, axes=None) matmul211: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims212, out_dtype="void") add234: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul211, model_decoder_layers_9_encoder_attn_v_proj_bias1) reshape293: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add234, R.shape([batch_size, 1500, 20, 64])) reshape294: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape292, R.shape([batch_size * 1500, 20, 64])) reshape295: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape293, R.shape([batch_size * 1500, 20, 64])) lv45: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv44, R.prim_value(9), reshape294, reshape295, sinfo_args=(R.Object,)) permute_dims213: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_k_proj_weight1, axes=None) matmul212: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims213, out_dtype="void") reshape296: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul212, R.shape([batch_size, 1500, 20, 64])) permute_dims214: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_v_proj_weight1, axes=None) matmul213: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims214, out_dtype="void") add235: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul213, model_decoder_layers_10_encoder_attn_v_proj_bias1) reshape297: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add235, R.shape([batch_size, 1500, 20, 64])) reshape298: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape296, R.shape([batch_size * 1500, 20, 64])) reshape299: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape297, R.shape([batch_size * 1500, 20, 64])) lv46: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv45, R.prim_value(10), reshape298, reshape299, sinfo_args=(R.Object,)) permute_dims215: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_k_proj_weight1, axes=None) matmul214: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims215, out_dtype="void") reshape300: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul214, R.shape([batch_size, 1500, 20, 64])) permute_dims216: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_v_proj_weight1, axes=None) matmul215: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims216, out_dtype="void") add236: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul215, model_decoder_layers_11_encoder_attn_v_proj_bias1) reshape301: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add236, R.shape([batch_size, 1500, 20, 64])) reshape302: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape300, R.shape([batch_size * 1500, 20, 64])) reshape303: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape301, R.shape([batch_size * 1500, 20, 64])) lv47: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv46, R.prim_value(11), reshape302, reshape303, sinfo_args=(R.Object,)) permute_dims217: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_k_proj_weight1, axes=None) matmul216: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims217, out_dtype="void") reshape304: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul216, R.shape([batch_size, 1500, 20, 64])) permute_dims218: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_v_proj_weight1, axes=None) matmul217: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims218, out_dtype="void") add237: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul217, model_decoder_layers_12_encoder_attn_v_proj_bias1) reshape305: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add237, R.shape([batch_size, 1500, 20, 64])) reshape306: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape304, R.shape([batch_size * 1500, 20, 64])) reshape307: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape305, R.shape([batch_size * 1500, 20, 64])) lv48: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv47, R.prim_value(12), reshape306, reshape307, sinfo_args=(R.Object,)) permute_dims219: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_k_proj_weight1, axes=None) matmul218: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims219, out_dtype="void") reshape308: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul218, R.shape([batch_size, 1500, 20, 64])) permute_dims220: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_v_proj_weight1, axes=None) matmul219: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims220, out_dtype="void") add238: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul219, model_decoder_layers_13_encoder_attn_v_proj_bias1) reshape309: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add238, R.shape([batch_size, 1500, 20, 64])) reshape310: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape308, R.shape([batch_size * 1500, 20, 64])) reshape311: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape309, R.shape([batch_size * 1500, 20, 64])) lv49: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv48, R.prim_value(13), reshape310, reshape311, sinfo_args=(R.Object,)) permute_dims221: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_k_proj_weight1, axes=None) matmul220: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims221, out_dtype="void") reshape312: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul220, R.shape([batch_size, 1500, 20, 64])) permute_dims222: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_v_proj_weight1, axes=None) matmul221: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims222, out_dtype="void") add239: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul221, model_decoder_layers_14_encoder_attn_v_proj_bias1) reshape313: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add239, R.shape([batch_size, 1500, 20, 64])) reshape314: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape312, R.shape([batch_size * 1500, 20, 64])) reshape315: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape313, R.shape([batch_size * 1500, 20, 64])) lv50: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv49, R.prim_value(14), reshape314, reshape315, sinfo_args=(R.Object,)) permute_dims223: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_k_proj_weight1, axes=None) matmul222: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims223, out_dtype="void") reshape316: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul222, R.shape([batch_size, 1500, 20, 64])) permute_dims224: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_v_proj_weight1, axes=None) matmul223: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims224, out_dtype="void") add240: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul223, model_decoder_layers_15_encoder_attn_v_proj_bias1) reshape317: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add240, R.shape([batch_size, 1500, 20, 64])) reshape318: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape316, R.shape([batch_size * 1500, 20, 64])) reshape319: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape317, R.shape([batch_size * 1500, 20, 64])) lv51: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv50, R.prim_value(15), reshape318, reshape319, sinfo_args=(R.Object,)) permute_dims225: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_k_proj_weight1, axes=None) matmul224: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims225, out_dtype="void") reshape320: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul224, R.shape([batch_size, 1500, 20, 64])) permute_dims226: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_v_proj_weight1, axes=None) matmul225: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims226, out_dtype="void") add241: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul225, model_decoder_layers_16_encoder_attn_v_proj_bias1) reshape321: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add241, R.shape([batch_size, 1500, 20, 64])) reshape322: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape320, R.shape([batch_size * 1500, 20, 64])) reshape323: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape321, R.shape([batch_size * 1500, 20, 64])) lv52: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv51, R.prim_value(16), reshape322, reshape323, sinfo_args=(R.Object,)) permute_dims227: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_k_proj_weight1, axes=None) matmul226: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims227, out_dtype="void") reshape324: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul226, R.shape([batch_size, 1500, 20, 64])) permute_dims228: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_v_proj_weight1, axes=None) matmul227: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims228, out_dtype="void") add242: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul227, model_decoder_layers_17_encoder_attn_v_proj_bias1) reshape325: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add242, R.shape([batch_size, 1500, 20, 64])) reshape326: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape324, R.shape([batch_size * 1500, 20, 64])) reshape327: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape325, R.shape([batch_size * 1500, 20, 64])) lv53: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv52, R.prim_value(17), reshape326, reshape327, sinfo_args=(R.Object,)) permute_dims229: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_k_proj_weight1, axes=None) matmul228: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims229, out_dtype="void") reshape328: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul228, R.shape([batch_size, 1500, 20, 64])) permute_dims230: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_v_proj_weight1, axes=None) matmul229: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims230, out_dtype="void") add243: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul229, model_decoder_layers_18_encoder_attn_v_proj_bias1) reshape329: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add243, R.shape([batch_size, 1500, 20, 64])) reshape330: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape328, R.shape([batch_size * 1500, 20, 64])) reshape331: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape329, R.shape([batch_size * 1500, 20, 64])) lv54: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv53, R.prim_value(18), reshape330, reshape331, sinfo_args=(R.Object,)) permute_dims231: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_k_proj_weight1, axes=None) matmul230: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims231, out_dtype="void") reshape332: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul230, R.shape([batch_size, 1500, 20, 64])) permute_dims232: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_v_proj_weight1, axes=None) matmul231: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims232, out_dtype="void") add244: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul231, model_decoder_layers_19_encoder_attn_v_proj_bias1) reshape333: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add244, R.shape([batch_size, 1500, 20, 64])) reshape334: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape332, R.shape([batch_size * 1500, 20, 64])) reshape335: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape333, R.shape([batch_size * 1500, 20, 64])) lv55: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv54, R.prim_value(19), reshape334, reshape335, sinfo_args=(R.Object,)) permute_dims233: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_k_proj_weight1, axes=None) matmul232: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims233, out_dtype="void") reshape336: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul232, R.shape([batch_size, 1500, 20, 64])) permute_dims234: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_v_proj_weight1, axes=None) matmul233: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims234, out_dtype="void") add245: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul233, model_decoder_layers_20_encoder_attn_v_proj_bias1) reshape337: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add245, R.shape([batch_size, 1500, 20, 64])) reshape338: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape336, R.shape([batch_size * 1500, 20, 64])) reshape339: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape337, R.shape([batch_size * 1500, 20, 64])) lv56: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv55, R.prim_value(20), reshape338, reshape339, sinfo_args=(R.Object,)) permute_dims235: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_k_proj_weight1, axes=None) matmul234: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims235, out_dtype="void") reshape340: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul234, R.shape([batch_size, 1500, 20, 64])) permute_dims236: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_v_proj_weight1, axes=None) matmul235: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims236, out_dtype="void") add246: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul235, model_decoder_layers_21_encoder_attn_v_proj_bias1) reshape341: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add246, R.shape([batch_size, 1500, 20, 64])) reshape342: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape340, R.shape([batch_size * 1500, 20, 64])) reshape343: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape341, R.shape([batch_size * 1500, 20, 64])) lv57: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv56, R.prim_value(21), reshape342, reshape343, sinfo_args=(R.Object,)) permute_dims237: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_k_proj_weight1, axes=None) matmul236: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims237, out_dtype="void") reshape344: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul236, R.shape([batch_size, 1500, 20, 64])) permute_dims238: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_v_proj_weight1, axes=None) matmul237: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims238, out_dtype="void") add247: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul237, model_decoder_layers_22_encoder_attn_v_proj_bias1) reshape345: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add247, R.shape([batch_size, 1500, 20, 64])) reshape346: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape344, R.shape([batch_size * 1500, 20, 64])) reshape347: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape345, R.shape([batch_size * 1500, 20, 64])) lv58: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv57, R.prim_value(22), reshape346, reshape347, sinfo_args=(R.Object,)) permute_dims239: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_k_proj_weight1, axes=None) matmul238: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims239, out_dtype="void") reshape348: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul238, R.shape([batch_size, 1500, 20, 64])) permute_dims240: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_v_proj_weight1, axes=None) matmul239: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims240, out_dtype="void") add248: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul239, model_decoder_layers_23_encoder_attn_v_proj_bias1) reshape349: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add248, R.shape([batch_size, 1500, 20, 64])) reshape350: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape348, R.shape([batch_size * 1500, 20, 64])) reshape351: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape349, R.shape([batch_size * 1500, 20, 64])) lv59: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv58, R.prim_value(23), reshape350, reshape351, sinfo_args=(R.Object,)) permute_dims241: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_k_proj_weight1, axes=None) matmul240: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims241, out_dtype="void") reshape352: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul240, R.shape([batch_size, 1500, 20, 64])) permute_dims242: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_v_proj_weight1, axes=None) matmul241: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims242, out_dtype="void") add249: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul241, model_decoder_layers_24_encoder_attn_v_proj_bias1) reshape353: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add249, R.shape([batch_size, 1500, 20, 64])) reshape354: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape352, R.shape([batch_size * 1500, 20, 64])) reshape355: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape353, R.shape([batch_size * 1500, 20, 64])) lv60: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv59, R.prim_value(24), reshape354, reshape355, sinfo_args=(R.Object,)) permute_dims243: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_k_proj_weight1, axes=None) matmul242: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims243, out_dtype="void") reshape356: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul242, R.shape([batch_size, 1500, 20, 64])) permute_dims244: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_v_proj_weight1, axes=None) matmul243: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims244, out_dtype="void") add250: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul243, model_decoder_layers_25_encoder_attn_v_proj_bias1) reshape357: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add250, R.shape([batch_size, 1500, 20, 64])) reshape358: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape356, R.shape([batch_size * 1500, 20, 64])) reshape359: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape357, R.shape([batch_size * 1500, 20, 64])) lv61: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv60, R.prim_value(25), reshape358, reshape359, sinfo_args=(R.Object,)) permute_dims245: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_k_proj_weight1, axes=None) matmul244: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims245, out_dtype="void") reshape360: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul244, R.shape([batch_size, 1500, 20, 64])) permute_dims246: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_v_proj_weight1, axes=None) matmul245: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims246, out_dtype="void") add251: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul245, model_decoder_layers_26_encoder_attn_v_proj_bias1) reshape361: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add251, R.shape([batch_size, 1500, 20, 64])) reshape362: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape360, R.shape([batch_size * 1500, 20, 64])) reshape363: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape361, R.shape([batch_size * 1500, 20, 64])) lv62: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv61, R.prim_value(26), reshape362, reshape363, sinfo_args=(R.Object,)) permute_dims247: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_k_proj_weight1, axes=None) matmul246: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims247, out_dtype="void") reshape364: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul246, R.shape([batch_size, 1500, 20, 64])) permute_dims248: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_v_proj_weight1, axes=None) matmul247: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims248, out_dtype="void") add252: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul247, model_decoder_layers_27_encoder_attn_v_proj_bias1) reshape365: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add252, R.shape([batch_size, 1500, 20, 64])) reshape366: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape364, R.shape([batch_size * 1500, 20, 64])) reshape367: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape365, R.shape([batch_size * 1500, 20, 64])) lv63: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv62, R.prim_value(27), reshape366, reshape367, sinfo_args=(R.Object,)) permute_dims249: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_k_proj_weight1, axes=None) matmul248: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims249, out_dtype="void") reshape368: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul248, R.shape([batch_size, 1500, 20, 64])) permute_dims250: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_v_proj_weight1, axes=None) matmul249: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims250, out_dtype="void") add253: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul249, model_decoder_layers_28_encoder_attn_v_proj_bias1) reshape369: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add253, R.shape([batch_size, 1500, 20, 64])) reshape370: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape368, R.shape([batch_size * 1500, 20, 64])) reshape371: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape369, R.shape([batch_size * 1500, 20, 64])) lv64: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv63, R.prim_value(28), reshape370, reshape371, sinfo_args=(R.Object,)) permute_dims251: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_k_proj_weight1, axes=None) matmul250: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims251, out_dtype="void") reshape372: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul250, R.shape([batch_size, 1500, 20, 64])) permute_dims252: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_v_proj_weight1, axes=None) matmul251: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims252, out_dtype="void") add254: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul251, model_decoder_layers_29_encoder_attn_v_proj_bias1) reshape373: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add254, R.shape([batch_size, 1500, 20, 64])) reshape374: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape372, R.shape([batch_size * 1500, 20, 64])) reshape375: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape373, R.shape([batch_size * 1500, 20, 64])) lv65: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv64, R.prim_value(29), reshape374, reshape375, sinfo_args=(R.Object,)) permute_dims253: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_k_proj_weight1, axes=None) matmul252: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims253, out_dtype="void") reshape376: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul252, R.shape([batch_size, 1500, 20, 64])) permute_dims254: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_v_proj_weight1, axes=None) matmul253: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims254, out_dtype="void") add255: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul253, model_decoder_layers_30_encoder_attn_v_proj_bias1) reshape377: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add255, R.shape([batch_size, 1500, 20, 64])) reshape378: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape376, R.shape([batch_size * 1500, 20, 64])) reshape379: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape377, R.shape([batch_size * 1500, 20, 64])) lv66: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv65, R.prim_value(30), reshape378, reshape379, sinfo_args=(R.Object,)) permute_dims255: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_k_proj_weight1, axes=None) matmul254: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims255, out_dtype="void") reshape380: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul254, R.shape([batch_size, 1500, 20, 64])) permute_dims256: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_v_proj_weight1, axes=None) matmul255: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(encoder_hidden_states, permute_dims256, out_dtype="void") add256: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul255, model_decoder_layers_31_encoder_attn_v_proj_bias1) reshape381: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add256, R.shape([batch_size, 1500, 20, 64])) reshape382: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape380, R.shape([batch_size * 1500, 20, 64])) reshape383: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape381, R.shape([batch_size * 1500, 20, 64])) lv67: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv66, R.prim_value(31), reshape382, reshape383, sinfo_args=(R.Object,)) gv1: R.Object = lv67 R.output(gv1) return gv1 @R.function def batch_decode(input_ids: R.Tensor(("batch_size", 1), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor(("batch_size", 1, 51866), dtype="float32"): batch_size = T.int64() R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "relax.rewrite_cuda_graph.capture_symbolic_vars": ["batch_size"], "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) with R.dataflow(): model_encoder_conv1_weight3: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] model_encoder_conv1_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1] model_encoder_conv2_weight3: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] model_encoder_conv2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[3] model_encoder_embed_positions_weight3: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] model_encoder_layers_0_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] model_encoder_layers_0_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] model_encoder_layers_0_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[7] model_encoder_layers_0_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] model_encoder_layers_0_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[9] model_encoder_layers_0_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] model_encoder_layers_0_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[11] model_encoder_layers_0_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[12] model_encoder_layers_0_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[13] model_encoder_layers_0_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] model_encoder_layers_0_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[15] model_encoder_layers_0_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] model_encoder_layers_0_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[17] model_encoder_layers_0_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[18] model_encoder_layers_0_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[19] model_encoder_layers_1_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] model_encoder_layers_1_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] model_encoder_layers_1_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[22] model_encoder_layers_1_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] model_encoder_layers_1_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[24] model_encoder_layers_1_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] model_encoder_layers_1_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[26] model_encoder_layers_1_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[27] model_encoder_layers_1_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[28] model_encoder_layers_1_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] model_encoder_layers_1_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[30] model_encoder_layers_1_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] model_encoder_layers_1_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[32] model_encoder_layers_1_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[33] model_encoder_layers_1_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[34] model_encoder_layers_2_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] model_encoder_layers_2_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] model_encoder_layers_2_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[37] model_encoder_layers_2_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] model_encoder_layers_2_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[39] model_encoder_layers_2_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] model_encoder_layers_2_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[41] model_encoder_layers_2_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[42] model_encoder_layers_2_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[43] model_encoder_layers_2_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] model_encoder_layers_2_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[45] model_encoder_layers_2_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] model_encoder_layers_2_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[47] model_encoder_layers_2_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[48] model_encoder_layers_2_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[49] model_encoder_layers_3_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] model_encoder_layers_3_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] model_encoder_layers_3_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[52] model_encoder_layers_3_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] model_encoder_layers_3_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[54] model_encoder_layers_3_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] model_encoder_layers_3_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[56] model_encoder_layers_3_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[57] model_encoder_layers_3_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[58] model_encoder_layers_3_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] model_encoder_layers_3_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[60] model_encoder_layers_3_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] model_encoder_layers_3_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[62] model_encoder_layers_3_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[63] model_encoder_layers_3_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[64] model_encoder_layers_4_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] model_encoder_layers_4_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] model_encoder_layers_4_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[67] model_encoder_layers_4_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] model_encoder_layers_4_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[69] model_encoder_layers_4_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] model_encoder_layers_4_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[71] model_encoder_layers_4_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[72] model_encoder_layers_4_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[73] model_encoder_layers_4_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] model_encoder_layers_4_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[75] model_encoder_layers_4_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] model_encoder_layers_4_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[77] model_encoder_layers_4_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[78] model_encoder_layers_4_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[79] model_encoder_layers_5_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] model_encoder_layers_5_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] model_encoder_layers_5_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[82] model_encoder_layers_5_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] model_encoder_layers_5_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[84] model_encoder_layers_5_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] model_encoder_layers_5_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[86] model_encoder_layers_5_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[87] model_encoder_layers_5_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[88] model_encoder_layers_5_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] model_encoder_layers_5_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[90] model_encoder_layers_5_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] model_encoder_layers_5_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[92] model_encoder_layers_5_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[93] model_encoder_layers_5_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[94] model_encoder_layers_6_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] model_encoder_layers_6_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] model_encoder_layers_6_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[97] model_encoder_layers_6_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] model_encoder_layers_6_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[99] model_encoder_layers_6_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] model_encoder_layers_6_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[101] model_encoder_layers_6_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[102] model_encoder_layers_6_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[103] model_encoder_layers_6_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] model_encoder_layers_6_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[105] model_encoder_layers_6_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] model_encoder_layers_6_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[107] model_encoder_layers_6_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[108] model_encoder_layers_6_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[109] model_encoder_layers_7_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] model_encoder_layers_7_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] model_encoder_layers_7_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[112] model_encoder_layers_7_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] model_encoder_layers_7_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[114] model_encoder_layers_7_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] model_encoder_layers_7_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[116] model_encoder_layers_7_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[117] model_encoder_layers_7_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[118] model_encoder_layers_7_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] model_encoder_layers_7_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[120] model_encoder_layers_7_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] model_encoder_layers_7_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[122] model_encoder_layers_7_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[123] model_encoder_layers_7_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[124] model_encoder_layers_8_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] model_encoder_layers_8_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] model_encoder_layers_8_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[127] model_encoder_layers_8_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] model_encoder_layers_8_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[129] model_encoder_layers_8_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] model_encoder_layers_8_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[131] model_encoder_layers_8_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[132] model_encoder_layers_8_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[133] model_encoder_layers_8_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] model_encoder_layers_8_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[135] model_encoder_layers_8_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] model_encoder_layers_8_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[137] model_encoder_layers_8_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[138] model_encoder_layers_8_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[139] model_encoder_layers_9_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] model_encoder_layers_9_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] model_encoder_layers_9_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[142] model_encoder_layers_9_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] model_encoder_layers_9_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[144] model_encoder_layers_9_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] model_encoder_layers_9_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[146] model_encoder_layers_9_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[147] model_encoder_layers_9_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[148] model_encoder_layers_9_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] model_encoder_layers_9_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[150] model_encoder_layers_9_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] model_encoder_layers_9_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[152] model_encoder_layers_9_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[153] model_encoder_layers_9_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[154] model_encoder_layers_10_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] model_encoder_layers_10_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] model_encoder_layers_10_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[157] model_encoder_layers_10_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] model_encoder_layers_10_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[159] model_encoder_layers_10_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] model_encoder_layers_10_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[161] model_encoder_layers_10_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[162] model_encoder_layers_10_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[163] model_encoder_layers_10_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] model_encoder_layers_10_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[165] model_encoder_layers_10_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] model_encoder_layers_10_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[167] model_encoder_layers_10_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[168] model_encoder_layers_10_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[169] model_encoder_layers_11_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] model_encoder_layers_11_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] model_encoder_layers_11_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[172] model_encoder_layers_11_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] model_encoder_layers_11_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[174] model_encoder_layers_11_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] model_encoder_layers_11_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[176] model_encoder_layers_11_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[177] model_encoder_layers_11_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[178] model_encoder_layers_11_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] model_encoder_layers_11_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[180] model_encoder_layers_11_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] model_encoder_layers_11_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[182] model_encoder_layers_11_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[183] model_encoder_layers_11_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[184] model_encoder_layers_12_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] model_encoder_layers_12_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] model_encoder_layers_12_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[187] model_encoder_layers_12_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] model_encoder_layers_12_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[189] model_encoder_layers_12_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] model_encoder_layers_12_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[191] model_encoder_layers_12_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[192] model_encoder_layers_12_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[193] model_encoder_layers_12_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] model_encoder_layers_12_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[195] model_encoder_layers_12_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] model_encoder_layers_12_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[197] model_encoder_layers_12_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[198] model_encoder_layers_12_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[199] model_encoder_layers_13_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] model_encoder_layers_13_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] model_encoder_layers_13_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[202] model_encoder_layers_13_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] model_encoder_layers_13_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[204] model_encoder_layers_13_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] model_encoder_layers_13_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[206] model_encoder_layers_13_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[207] model_encoder_layers_13_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[208] model_encoder_layers_13_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] model_encoder_layers_13_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[210] model_encoder_layers_13_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] model_encoder_layers_13_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[212] model_encoder_layers_13_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[213] model_encoder_layers_13_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[214] model_encoder_layers_14_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] model_encoder_layers_14_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] model_encoder_layers_14_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[217] model_encoder_layers_14_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] model_encoder_layers_14_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[219] model_encoder_layers_14_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] model_encoder_layers_14_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[221] model_encoder_layers_14_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[222] model_encoder_layers_14_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[223] model_encoder_layers_14_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] model_encoder_layers_14_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[225] model_encoder_layers_14_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] model_encoder_layers_14_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[227] model_encoder_layers_14_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[228] model_encoder_layers_14_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[229] model_encoder_layers_15_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] model_encoder_layers_15_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] model_encoder_layers_15_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[232] model_encoder_layers_15_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] model_encoder_layers_15_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[234] model_encoder_layers_15_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] model_encoder_layers_15_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[236] model_encoder_layers_15_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[237] model_encoder_layers_15_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[238] model_encoder_layers_15_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] model_encoder_layers_15_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[240] model_encoder_layers_15_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] model_encoder_layers_15_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[242] model_encoder_layers_15_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[243] model_encoder_layers_15_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[244] model_encoder_layers_16_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] model_encoder_layers_16_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] model_encoder_layers_16_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[247] model_encoder_layers_16_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] model_encoder_layers_16_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[249] model_encoder_layers_16_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] model_encoder_layers_16_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[251] model_encoder_layers_16_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[252] model_encoder_layers_16_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[253] model_encoder_layers_16_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] model_encoder_layers_16_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[255] model_encoder_layers_16_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] model_encoder_layers_16_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[257] model_encoder_layers_16_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[258] model_encoder_layers_16_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[259] model_encoder_layers_17_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] model_encoder_layers_17_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] model_encoder_layers_17_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[262] model_encoder_layers_17_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] model_encoder_layers_17_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[264] model_encoder_layers_17_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] model_encoder_layers_17_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[266] model_encoder_layers_17_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[267] model_encoder_layers_17_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[268] model_encoder_layers_17_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] model_encoder_layers_17_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[270] model_encoder_layers_17_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] model_encoder_layers_17_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[272] model_encoder_layers_17_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[273] model_encoder_layers_17_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[274] model_encoder_layers_18_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] model_encoder_layers_18_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] model_encoder_layers_18_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[277] model_encoder_layers_18_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] model_encoder_layers_18_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[279] model_encoder_layers_18_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] model_encoder_layers_18_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[281] model_encoder_layers_18_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[282] model_encoder_layers_18_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[283] model_encoder_layers_18_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] model_encoder_layers_18_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[285] model_encoder_layers_18_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] model_encoder_layers_18_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[287] model_encoder_layers_18_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[288] model_encoder_layers_18_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[289] model_encoder_layers_19_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] model_encoder_layers_19_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] model_encoder_layers_19_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[292] model_encoder_layers_19_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] model_encoder_layers_19_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[294] model_encoder_layers_19_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] model_encoder_layers_19_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[296] model_encoder_layers_19_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[297] model_encoder_layers_19_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[298] model_encoder_layers_19_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] model_encoder_layers_19_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[300] model_encoder_layers_19_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] model_encoder_layers_19_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[302] model_encoder_layers_19_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[303] model_encoder_layers_19_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[304] model_encoder_layers_20_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] model_encoder_layers_20_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] model_encoder_layers_20_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[307] model_encoder_layers_20_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] model_encoder_layers_20_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[309] model_encoder_layers_20_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] model_encoder_layers_20_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[311] model_encoder_layers_20_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[312] model_encoder_layers_20_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[313] model_encoder_layers_20_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] model_encoder_layers_20_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[315] model_encoder_layers_20_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] model_encoder_layers_20_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[317] model_encoder_layers_20_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[318] model_encoder_layers_20_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[319] model_encoder_layers_21_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] model_encoder_layers_21_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] model_encoder_layers_21_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[322] model_encoder_layers_21_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] model_encoder_layers_21_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[324] model_encoder_layers_21_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] model_encoder_layers_21_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[326] model_encoder_layers_21_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[327] model_encoder_layers_21_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[328] model_encoder_layers_21_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] model_encoder_layers_21_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[330] model_encoder_layers_21_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] model_encoder_layers_21_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[332] model_encoder_layers_21_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[333] model_encoder_layers_21_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[334] model_encoder_layers_22_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] model_encoder_layers_22_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] model_encoder_layers_22_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[337] model_encoder_layers_22_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] model_encoder_layers_22_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[339] model_encoder_layers_22_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] model_encoder_layers_22_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[341] model_encoder_layers_22_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[342] model_encoder_layers_22_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[343] model_encoder_layers_22_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] model_encoder_layers_22_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[345] model_encoder_layers_22_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] model_encoder_layers_22_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[347] model_encoder_layers_22_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[348] model_encoder_layers_22_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[349] model_encoder_layers_23_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] model_encoder_layers_23_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] model_encoder_layers_23_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[352] model_encoder_layers_23_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] model_encoder_layers_23_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[354] model_encoder_layers_23_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] model_encoder_layers_23_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[356] model_encoder_layers_23_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[357] model_encoder_layers_23_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[358] model_encoder_layers_23_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] model_encoder_layers_23_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[360] model_encoder_layers_23_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] model_encoder_layers_23_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[362] model_encoder_layers_23_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[363] model_encoder_layers_23_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[364] model_encoder_layers_24_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] model_encoder_layers_24_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] model_encoder_layers_24_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[367] model_encoder_layers_24_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] model_encoder_layers_24_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[369] model_encoder_layers_24_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] model_encoder_layers_24_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[371] model_encoder_layers_24_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[372] model_encoder_layers_24_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[373] model_encoder_layers_24_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] model_encoder_layers_24_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[375] model_encoder_layers_24_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] model_encoder_layers_24_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[377] model_encoder_layers_24_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[378] model_encoder_layers_24_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[379] model_encoder_layers_25_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] model_encoder_layers_25_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] model_encoder_layers_25_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[382] model_encoder_layers_25_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] model_encoder_layers_25_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[384] model_encoder_layers_25_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] model_encoder_layers_25_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[386] model_encoder_layers_25_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[387] model_encoder_layers_25_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[388] model_encoder_layers_25_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] model_encoder_layers_25_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[390] model_encoder_layers_25_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] model_encoder_layers_25_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[392] model_encoder_layers_25_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[393] model_encoder_layers_25_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[394] model_encoder_layers_26_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] model_encoder_layers_26_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] model_encoder_layers_26_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[397] model_encoder_layers_26_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] model_encoder_layers_26_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[399] model_encoder_layers_26_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] model_encoder_layers_26_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[401] model_encoder_layers_26_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[402] model_encoder_layers_26_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[403] model_encoder_layers_26_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] model_encoder_layers_26_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[405] model_encoder_layers_26_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] model_encoder_layers_26_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[407] model_encoder_layers_26_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[408] model_encoder_layers_26_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[409] model_encoder_layers_27_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] model_encoder_layers_27_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] model_encoder_layers_27_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[412] model_encoder_layers_27_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] model_encoder_layers_27_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[414] model_encoder_layers_27_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] model_encoder_layers_27_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[416] model_encoder_layers_27_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[417] model_encoder_layers_27_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[418] model_encoder_layers_27_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] model_encoder_layers_27_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[420] model_encoder_layers_27_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] model_encoder_layers_27_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[422] model_encoder_layers_27_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[423] model_encoder_layers_27_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[424] model_encoder_layers_28_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] model_encoder_layers_28_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] model_encoder_layers_28_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[427] model_encoder_layers_28_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] model_encoder_layers_28_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[429] model_encoder_layers_28_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] model_encoder_layers_28_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[431] model_encoder_layers_28_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[432] model_encoder_layers_28_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[433] model_encoder_layers_28_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] model_encoder_layers_28_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[435] model_encoder_layers_28_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] model_encoder_layers_28_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[437] model_encoder_layers_28_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[438] model_encoder_layers_28_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[439] model_encoder_layers_29_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] model_encoder_layers_29_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] model_encoder_layers_29_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[442] model_encoder_layers_29_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] model_encoder_layers_29_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[444] model_encoder_layers_29_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] model_encoder_layers_29_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[446] model_encoder_layers_29_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[447] model_encoder_layers_29_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[448] model_encoder_layers_29_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] model_encoder_layers_29_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[450] model_encoder_layers_29_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] model_encoder_layers_29_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[452] model_encoder_layers_29_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[453] model_encoder_layers_29_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[454] model_encoder_layers_30_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] model_encoder_layers_30_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] model_encoder_layers_30_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[457] model_encoder_layers_30_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] model_encoder_layers_30_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[459] model_encoder_layers_30_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] model_encoder_layers_30_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[461] model_encoder_layers_30_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[462] model_encoder_layers_30_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[463] model_encoder_layers_30_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] model_encoder_layers_30_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[465] model_encoder_layers_30_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] model_encoder_layers_30_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[467] model_encoder_layers_30_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[468] model_encoder_layers_30_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[469] model_encoder_layers_31_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] model_encoder_layers_31_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] model_encoder_layers_31_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[472] model_encoder_layers_31_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] model_encoder_layers_31_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[474] model_encoder_layers_31_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] model_encoder_layers_31_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[476] model_encoder_layers_31_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[477] model_encoder_layers_31_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[478] model_encoder_layers_31_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] model_encoder_layers_31_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[480] model_encoder_layers_31_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] model_encoder_layers_31_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[482] model_encoder_layers_31_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[483] model_encoder_layers_31_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[484] model_encoder_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[485] model_encoder_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[486] model_decoder_embed_tokens_weight3: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] model_decoder_embed_positions_weight3: R.Tensor((448, 1280), dtype="float16") = packed_params[488] model_decoder_layers_0_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] model_decoder_layers_0_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] model_decoder_layers_0_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[491] model_decoder_layers_0_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] model_decoder_layers_0_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[493] model_decoder_layers_0_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] model_decoder_layers_0_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[495] model_decoder_layers_0_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[496] model_decoder_layers_0_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[497] model_decoder_layers_0_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] model_decoder_layers_0_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] model_decoder_layers_0_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[500] model_decoder_layers_0_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] model_decoder_layers_0_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[502] model_decoder_layers_0_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] model_decoder_layers_0_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[504] model_decoder_layers_0_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[505] model_decoder_layers_0_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[506] model_decoder_layers_0_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] model_decoder_layers_0_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[508] model_decoder_layers_0_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] model_decoder_layers_0_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[510] model_decoder_layers_0_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[511] model_decoder_layers_0_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[512] model_decoder_layers_1_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] model_decoder_layers_1_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] model_decoder_layers_1_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[515] model_decoder_layers_1_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] model_decoder_layers_1_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[517] model_decoder_layers_1_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] model_decoder_layers_1_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[519] model_decoder_layers_1_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[520] model_decoder_layers_1_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[521] model_decoder_layers_1_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] model_decoder_layers_1_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] model_decoder_layers_1_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[524] model_decoder_layers_1_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] model_decoder_layers_1_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[526] model_decoder_layers_1_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] model_decoder_layers_1_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[528] model_decoder_layers_1_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[529] model_decoder_layers_1_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[530] model_decoder_layers_1_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] model_decoder_layers_1_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[532] model_decoder_layers_1_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] model_decoder_layers_1_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[534] model_decoder_layers_1_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[535] model_decoder_layers_1_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[536] model_decoder_layers_2_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] model_decoder_layers_2_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] model_decoder_layers_2_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[539] model_decoder_layers_2_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] model_decoder_layers_2_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[541] model_decoder_layers_2_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] model_decoder_layers_2_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[543] model_decoder_layers_2_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[544] model_decoder_layers_2_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[545] model_decoder_layers_2_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] model_decoder_layers_2_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] model_decoder_layers_2_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[548] model_decoder_layers_2_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] model_decoder_layers_2_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[550] model_decoder_layers_2_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] model_decoder_layers_2_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[552] model_decoder_layers_2_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[553] model_decoder_layers_2_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[554] model_decoder_layers_2_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] model_decoder_layers_2_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[556] model_decoder_layers_2_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] model_decoder_layers_2_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[558] model_decoder_layers_2_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[559] model_decoder_layers_2_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[560] model_decoder_layers_3_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] model_decoder_layers_3_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] model_decoder_layers_3_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[563] model_decoder_layers_3_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] model_decoder_layers_3_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[565] model_decoder_layers_3_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] model_decoder_layers_3_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[567] model_decoder_layers_3_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[568] model_decoder_layers_3_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[569] model_decoder_layers_3_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] model_decoder_layers_3_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] model_decoder_layers_3_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[572] model_decoder_layers_3_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] model_decoder_layers_3_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[574] model_decoder_layers_3_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] model_decoder_layers_3_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[576] model_decoder_layers_3_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[577] model_decoder_layers_3_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[578] model_decoder_layers_3_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] model_decoder_layers_3_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[580] model_decoder_layers_3_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] model_decoder_layers_3_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[582] model_decoder_layers_3_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[583] model_decoder_layers_3_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[584] model_decoder_layers_4_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] model_decoder_layers_4_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] model_decoder_layers_4_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[587] model_decoder_layers_4_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] model_decoder_layers_4_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[589] model_decoder_layers_4_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] model_decoder_layers_4_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[591] model_decoder_layers_4_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[592] model_decoder_layers_4_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[593] model_decoder_layers_4_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] model_decoder_layers_4_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] model_decoder_layers_4_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[596] model_decoder_layers_4_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] model_decoder_layers_4_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[598] model_decoder_layers_4_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] model_decoder_layers_4_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[600] model_decoder_layers_4_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[601] model_decoder_layers_4_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[602] model_decoder_layers_4_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] model_decoder_layers_4_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[604] model_decoder_layers_4_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] model_decoder_layers_4_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[606] model_decoder_layers_4_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[607] model_decoder_layers_4_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[608] model_decoder_layers_5_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] model_decoder_layers_5_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] model_decoder_layers_5_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[611] model_decoder_layers_5_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] model_decoder_layers_5_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[613] model_decoder_layers_5_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] model_decoder_layers_5_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[615] model_decoder_layers_5_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[616] model_decoder_layers_5_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[617] model_decoder_layers_5_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] model_decoder_layers_5_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] model_decoder_layers_5_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[620] model_decoder_layers_5_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] model_decoder_layers_5_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[622] model_decoder_layers_5_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] model_decoder_layers_5_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[624] model_decoder_layers_5_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[625] model_decoder_layers_5_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[626] model_decoder_layers_5_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] model_decoder_layers_5_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[628] model_decoder_layers_5_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] model_decoder_layers_5_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[630] model_decoder_layers_5_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[631] model_decoder_layers_5_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[632] model_decoder_layers_6_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] model_decoder_layers_6_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] model_decoder_layers_6_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[635] model_decoder_layers_6_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] model_decoder_layers_6_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[637] model_decoder_layers_6_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] model_decoder_layers_6_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[639] model_decoder_layers_6_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[640] model_decoder_layers_6_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[641] model_decoder_layers_6_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] model_decoder_layers_6_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] model_decoder_layers_6_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[644] model_decoder_layers_6_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] model_decoder_layers_6_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[646] model_decoder_layers_6_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] model_decoder_layers_6_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[648] model_decoder_layers_6_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[649] model_decoder_layers_6_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[650] model_decoder_layers_6_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] model_decoder_layers_6_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[652] model_decoder_layers_6_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] model_decoder_layers_6_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[654] model_decoder_layers_6_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[655] model_decoder_layers_6_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[656] model_decoder_layers_7_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] model_decoder_layers_7_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] model_decoder_layers_7_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[659] model_decoder_layers_7_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] model_decoder_layers_7_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[661] model_decoder_layers_7_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] model_decoder_layers_7_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[663] model_decoder_layers_7_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[664] model_decoder_layers_7_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[665] model_decoder_layers_7_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] model_decoder_layers_7_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] model_decoder_layers_7_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[668] model_decoder_layers_7_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] model_decoder_layers_7_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[670] model_decoder_layers_7_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] model_decoder_layers_7_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[672] model_decoder_layers_7_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[673] model_decoder_layers_7_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[674] model_decoder_layers_7_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] model_decoder_layers_7_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[676] model_decoder_layers_7_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] model_decoder_layers_7_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[678] model_decoder_layers_7_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[679] model_decoder_layers_7_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[680] model_decoder_layers_8_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] model_decoder_layers_8_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] model_decoder_layers_8_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[683] model_decoder_layers_8_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] model_decoder_layers_8_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[685] model_decoder_layers_8_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] model_decoder_layers_8_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[687] model_decoder_layers_8_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[688] model_decoder_layers_8_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[689] model_decoder_layers_8_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] model_decoder_layers_8_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] model_decoder_layers_8_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[692] model_decoder_layers_8_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] model_decoder_layers_8_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[694] model_decoder_layers_8_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] model_decoder_layers_8_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[696] model_decoder_layers_8_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[697] model_decoder_layers_8_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[698] model_decoder_layers_8_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] model_decoder_layers_8_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[700] model_decoder_layers_8_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] model_decoder_layers_8_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[702] model_decoder_layers_8_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[703] model_decoder_layers_8_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[704] model_decoder_layers_9_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] model_decoder_layers_9_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] model_decoder_layers_9_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[707] model_decoder_layers_9_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] model_decoder_layers_9_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[709] model_decoder_layers_9_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] model_decoder_layers_9_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[711] model_decoder_layers_9_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[712] model_decoder_layers_9_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[713] model_decoder_layers_9_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] model_decoder_layers_9_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] model_decoder_layers_9_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[716] model_decoder_layers_9_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] model_decoder_layers_9_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[718] model_decoder_layers_9_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] model_decoder_layers_9_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[720] model_decoder_layers_9_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[721] model_decoder_layers_9_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[722] model_decoder_layers_9_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] model_decoder_layers_9_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[724] model_decoder_layers_9_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] model_decoder_layers_9_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[726] model_decoder_layers_9_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[727] model_decoder_layers_9_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[728] model_decoder_layers_10_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] model_decoder_layers_10_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] model_decoder_layers_10_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[731] model_decoder_layers_10_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] model_decoder_layers_10_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[733] model_decoder_layers_10_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] model_decoder_layers_10_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[735] model_decoder_layers_10_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[736] model_decoder_layers_10_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[737] model_decoder_layers_10_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] model_decoder_layers_10_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] model_decoder_layers_10_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[740] model_decoder_layers_10_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] model_decoder_layers_10_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[742] model_decoder_layers_10_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] model_decoder_layers_10_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[744] model_decoder_layers_10_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[745] model_decoder_layers_10_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[746] model_decoder_layers_10_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] model_decoder_layers_10_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[748] model_decoder_layers_10_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] model_decoder_layers_10_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[750] model_decoder_layers_10_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[751] model_decoder_layers_10_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[752] model_decoder_layers_11_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] model_decoder_layers_11_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] model_decoder_layers_11_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[755] model_decoder_layers_11_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] model_decoder_layers_11_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[757] model_decoder_layers_11_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] model_decoder_layers_11_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[759] model_decoder_layers_11_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[760] model_decoder_layers_11_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[761] model_decoder_layers_11_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] model_decoder_layers_11_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] model_decoder_layers_11_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[764] model_decoder_layers_11_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] model_decoder_layers_11_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[766] model_decoder_layers_11_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] model_decoder_layers_11_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[768] model_decoder_layers_11_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[769] model_decoder_layers_11_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[770] model_decoder_layers_11_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] model_decoder_layers_11_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[772] model_decoder_layers_11_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] model_decoder_layers_11_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[774] model_decoder_layers_11_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[775] model_decoder_layers_11_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[776] model_decoder_layers_12_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] model_decoder_layers_12_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] model_decoder_layers_12_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[779] model_decoder_layers_12_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] model_decoder_layers_12_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[781] model_decoder_layers_12_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] model_decoder_layers_12_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[783] model_decoder_layers_12_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[784] model_decoder_layers_12_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[785] model_decoder_layers_12_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] model_decoder_layers_12_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] model_decoder_layers_12_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[788] model_decoder_layers_12_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] model_decoder_layers_12_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[790] model_decoder_layers_12_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] model_decoder_layers_12_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[792] model_decoder_layers_12_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[793] model_decoder_layers_12_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[794] model_decoder_layers_12_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] model_decoder_layers_12_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[796] model_decoder_layers_12_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] model_decoder_layers_12_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[798] model_decoder_layers_12_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[799] model_decoder_layers_12_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[800] model_decoder_layers_13_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] model_decoder_layers_13_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] model_decoder_layers_13_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[803] model_decoder_layers_13_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] model_decoder_layers_13_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[805] model_decoder_layers_13_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] model_decoder_layers_13_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[807] model_decoder_layers_13_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[808] model_decoder_layers_13_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[809] model_decoder_layers_13_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] model_decoder_layers_13_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] model_decoder_layers_13_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[812] model_decoder_layers_13_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] model_decoder_layers_13_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[814] model_decoder_layers_13_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] model_decoder_layers_13_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[816] model_decoder_layers_13_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[817] model_decoder_layers_13_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[818] model_decoder_layers_13_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] model_decoder_layers_13_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[820] model_decoder_layers_13_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] model_decoder_layers_13_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[822] model_decoder_layers_13_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[823] model_decoder_layers_13_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[824] model_decoder_layers_14_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] model_decoder_layers_14_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] model_decoder_layers_14_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[827] model_decoder_layers_14_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] model_decoder_layers_14_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[829] model_decoder_layers_14_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] model_decoder_layers_14_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[831] model_decoder_layers_14_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[832] model_decoder_layers_14_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[833] model_decoder_layers_14_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] model_decoder_layers_14_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] model_decoder_layers_14_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[836] model_decoder_layers_14_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] model_decoder_layers_14_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[838] model_decoder_layers_14_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] model_decoder_layers_14_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[840] model_decoder_layers_14_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[841] model_decoder_layers_14_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[842] model_decoder_layers_14_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] model_decoder_layers_14_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[844] model_decoder_layers_14_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] model_decoder_layers_14_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[846] model_decoder_layers_14_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[847] model_decoder_layers_14_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[848] model_decoder_layers_15_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] model_decoder_layers_15_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] model_decoder_layers_15_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[851] model_decoder_layers_15_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] model_decoder_layers_15_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[853] model_decoder_layers_15_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] model_decoder_layers_15_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[855] model_decoder_layers_15_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[856] model_decoder_layers_15_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[857] model_decoder_layers_15_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] model_decoder_layers_15_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] model_decoder_layers_15_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[860] model_decoder_layers_15_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] model_decoder_layers_15_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[862] model_decoder_layers_15_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] model_decoder_layers_15_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[864] model_decoder_layers_15_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[865] model_decoder_layers_15_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[866] model_decoder_layers_15_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] model_decoder_layers_15_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[868] model_decoder_layers_15_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] model_decoder_layers_15_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[870] model_decoder_layers_15_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[871] model_decoder_layers_15_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[872] model_decoder_layers_16_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] model_decoder_layers_16_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] model_decoder_layers_16_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[875] model_decoder_layers_16_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] model_decoder_layers_16_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[877] model_decoder_layers_16_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] model_decoder_layers_16_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[879] model_decoder_layers_16_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[880] model_decoder_layers_16_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[881] model_decoder_layers_16_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] model_decoder_layers_16_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] model_decoder_layers_16_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[884] model_decoder_layers_16_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] model_decoder_layers_16_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[886] model_decoder_layers_16_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] model_decoder_layers_16_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[888] model_decoder_layers_16_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[889] model_decoder_layers_16_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[890] model_decoder_layers_16_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] model_decoder_layers_16_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[892] model_decoder_layers_16_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] model_decoder_layers_16_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[894] model_decoder_layers_16_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[895] model_decoder_layers_16_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[896] model_decoder_layers_17_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] model_decoder_layers_17_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] model_decoder_layers_17_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[899] model_decoder_layers_17_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] model_decoder_layers_17_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[901] model_decoder_layers_17_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] model_decoder_layers_17_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[903] model_decoder_layers_17_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[904] model_decoder_layers_17_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[905] model_decoder_layers_17_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] model_decoder_layers_17_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] model_decoder_layers_17_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[908] model_decoder_layers_17_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] model_decoder_layers_17_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[910] model_decoder_layers_17_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] model_decoder_layers_17_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[912] model_decoder_layers_17_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[913] model_decoder_layers_17_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[914] model_decoder_layers_17_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] model_decoder_layers_17_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[916] model_decoder_layers_17_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] model_decoder_layers_17_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[918] model_decoder_layers_17_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[919] model_decoder_layers_17_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[920] model_decoder_layers_18_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] model_decoder_layers_18_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] model_decoder_layers_18_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[923] model_decoder_layers_18_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] model_decoder_layers_18_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[925] model_decoder_layers_18_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] model_decoder_layers_18_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[927] model_decoder_layers_18_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[928] model_decoder_layers_18_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[929] model_decoder_layers_18_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] model_decoder_layers_18_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] model_decoder_layers_18_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[932] model_decoder_layers_18_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] model_decoder_layers_18_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[934] model_decoder_layers_18_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] model_decoder_layers_18_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[936] model_decoder_layers_18_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[937] model_decoder_layers_18_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[938] model_decoder_layers_18_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] model_decoder_layers_18_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[940] model_decoder_layers_18_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] model_decoder_layers_18_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[942] model_decoder_layers_18_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[943] model_decoder_layers_18_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[944] model_decoder_layers_19_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] model_decoder_layers_19_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] model_decoder_layers_19_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[947] model_decoder_layers_19_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] model_decoder_layers_19_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[949] model_decoder_layers_19_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] model_decoder_layers_19_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[951] model_decoder_layers_19_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[952] model_decoder_layers_19_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[953] model_decoder_layers_19_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] model_decoder_layers_19_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] model_decoder_layers_19_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[956] model_decoder_layers_19_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] model_decoder_layers_19_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[958] model_decoder_layers_19_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] model_decoder_layers_19_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[960] model_decoder_layers_19_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[961] model_decoder_layers_19_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[962] model_decoder_layers_19_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] model_decoder_layers_19_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[964] model_decoder_layers_19_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] model_decoder_layers_19_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[966] model_decoder_layers_19_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[967] model_decoder_layers_19_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[968] model_decoder_layers_20_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] model_decoder_layers_20_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] model_decoder_layers_20_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[971] model_decoder_layers_20_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] model_decoder_layers_20_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[973] model_decoder_layers_20_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] model_decoder_layers_20_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[975] model_decoder_layers_20_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[976] model_decoder_layers_20_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[977] model_decoder_layers_20_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] model_decoder_layers_20_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] model_decoder_layers_20_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[980] model_decoder_layers_20_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] model_decoder_layers_20_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[982] model_decoder_layers_20_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] model_decoder_layers_20_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[984] model_decoder_layers_20_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[985] model_decoder_layers_20_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[986] model_decoder_layers_20_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] model_decoder_layers_20_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[988] model_decoder_layers_20_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] model_decoder_layers_20_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[990] model_decoder_layers_20_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[991] model_decoder_layers_20_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[992] model_decoder_layers_21_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] model_decoder_layers_21_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] model_decoder_layers_21_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[995] model_decoder_layers_21_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] model_decoder_layers_21_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[997] model_decoder_layers_21_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] model_decoder_layers_21_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[999] model_decoder_layers_21_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1000] model_decoder_layers_21_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1001] model_decoder_layers_21_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] model_decoder_layers_21_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] model_decoder_layers_21_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1004] model_decoder_layers_21_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] model_decoder_layers_21_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1006] model_decoder_layers_21_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] model_decoder_layers_21_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1008] model_decoder_layers_21_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1009] model_decoder_layers_21_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1010] model_decoder_layers_21_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] model_decoder_layers_21_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1012] model_decoder_layers_21_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] model_decoder_layers_21_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1014] model_decoder_layers_21_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1015] model_decoder_layers_21_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1016] model_decoder_layers_22_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] model_decoder_layers_22_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] model_decoder_layers_22_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1019] model_decoder_layers_22_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] model_decoder_layers_22_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1021] model_decoder_layers_22_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] model_decoder_layers_22_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1023] model_decoder_layers_22_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1024] model_decoder_layers_22_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1025] model_decoder_layers_22_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] model_decoder_layers_22_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] model_decoder_layers_22_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1028] model_decoder_layers_22_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] model_decoder_layers_22_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1030] model_decoder_layers_22_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] model_decoder_layers_22_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1032] model_decoder_layers_22_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1033] model_decoder_layers_22_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1034] model_decoder_layers_22_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] model_decoder_layers_22_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1036] model_decoder_layers_22_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] model_decoder_layers_22_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1038] model_decoder_layers_22_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1039] model_decoder_layers_22_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1040] model_decoder_layers_23_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] model_decoder_layers_23_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] model_decoder_layers_23_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1043] model_decoder_layers_23_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] model_decoder_layers_23_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1045] model_decoder_layers_23_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] model_decoder_layers_23_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1047] model_decoder_layers_23_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1048] model_decoder_layers_23_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1049] model_decoder_layers_23_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] model_decoder_layers_23_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] model_decoder_layers_23_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1052] model_decoder_layers_23_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] model_decoder_layers_23_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1054] model_decoder_layers_23_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] model_decoder_layers_23_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1056] model_decoder_layers_23_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1057] model_decoder_layers_23_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1058] model_decoder_layers_23_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] model_decoder_layers_23_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1060] model_decoder_layers_23_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] model_decoder_layers_23_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1062] model_decoder_layers_23_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1063] model_decoder_layers_23_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1064] model_decoder_layers_24_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] model_decoder_layers_24_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] model_decoder_layers_24_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1067] model_decoder_layers_24_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] model_decoder_layers_24_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1069] model_decoder_layers_24_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] model_decoder_layers_24_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1071] model_decoder_layers_24_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1072] model_decoder_layers_24_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1073] model_decoder_layers_24_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] model_decoder_layers_24_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] model_decoder_layers_24_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1076] model_decoder_layers_24_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] model_decoder_layers_24_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1078] model_decoder_layers_24_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] model_decoder_layers_24_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1080] model_decoder_layers_24_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1081] model_decoder_layers_24_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1082] model_decoder_layers_24_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] model_decoder_layers_24_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1084] model_decoder_layers_24_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] model_decoder_layers_24_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1086] model_decoder_layers_24_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1087] model_decoder_layers_24_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1088] model_decoder_layers_25_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] model_decoder_layers_25_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] model_decoder_layers_25_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1091] model_decoder_layers_25_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] model_decoder_layers_25_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1093] model_decoder_layers_25_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] model_decoder_layers_25_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1095] model_decoder_layers_25_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1096] model_decoder_layers_25_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1097] model_decoder_layers_25_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] model_decoder_layers_25_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] model_decoder_layers_25_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1100] model_decoder_layers_25_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] model_decoder_layers_25_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1102] model_decoder_layers_25_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] model_decoder_layers_25_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1104] model_decoder_layers_25_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1105] model_decoder_layers_25_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1106] model_decoder_layers_25_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] model_decoder_layers_25_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1108] model_decoder_layers_25_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] model_decoder_layers_25_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1110] model_decoder_layers_25_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1111] model_decoder_layers_25_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1112] model_decoder_layers_26_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] model_decoder_layers_26_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] model_decoder_layers_26_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1115] model_decoder_layers_26_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] model_decoder_layers_26_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1117] model_decoder_layers_26_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] model_decoder_layers_26_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1119] model_decoder_layers_26_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1120] model_decoder_layers_26_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1121] model_decoder_layers_26_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] model_decoder_layers_26_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] model_decoder_layers_26_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1124] model_decoder_layers_26_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] model_decoder_layers_26_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1126] model_decoder_layers_26_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] model_decoder_layers_26_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1128] model_decoder_layers_26_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1129] model_decoder_layers_26_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1130] model_decoder_layers_26_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] model_decoder_layers_26_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1132] model_decoder_layers_26_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] model_decoder_layers_26_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1134] model_decoder_layers_26_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1135] model_decoder_layers_26_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1136] model_decoder_layers_27_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] model_decoder_layers_27_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] model_decoder_layers_27_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1139] model_decoder_layers_27_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] model_decoder_layers_27_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1141] model_decoder_layers_27_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] model_decoder_layers_27_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1143] model_decoder_layers_27_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1144] model_decoder_layers_27_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1145] model_decoder_layers_27_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] model_decoder_layers_27_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] model_decoder_layers_27_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1148] model_decoder_layers_27_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] model_decoder_layers_27_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1150] model_decoder_layers_27_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] model_decoder_layers_27_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1152] model_decoder_layers_27_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1153] model_decoder_layers_27_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1154] model_decoder_layers_27_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] model_decoder_layers_27_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1156] model_decoder_layers_27_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] model_decoder_layers_27_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1158] model_decoder_layers_27_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1159] model_decoder_layers_27_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1160] model_decoder_layers_28_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] model_decoder_layers_28_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] model_decoder_layers_28_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1163] model_decoder_layers_28_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] model_decoder_layers_28_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1165] model_decoder_layers_28_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] model_decoder_layers_28_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1167] model_decoder_layers_28_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1168] model_decoder_layers_28_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1169] model_decoder_layers_28_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] model_decoder_layers_28_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] model_decoder_layers_28_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1172] model_decoder_layers_28_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] model_decoder_layers_28_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1174] model_decoder_layers_28_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] model_decoder_layers_28_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1176] model_decoder_layers_28_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1177] model_decoder_layers_28_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1178] model_decoder_layers_28_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] model_decoder_layers_28_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1180] model_decoder_layers_28_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] model_decoder_layers_28_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1182] model_decoder_layers_28_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1183] model_decoder_layers_28_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1184] model_decoder_layers_29_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] model_decoder_layers_29_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] model_decoder_layers_29_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1187] model_decoder_layers_29_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] model_decoder_layers_29_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1189] model_decoder_layers_29_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] model_decoder_layers_29_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1191] model_decoder_layers_29_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1192] model_decoder_layers_29_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1193] model_decoder_layers_29_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] model_decoder_layers_29_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] model_decoder_layers_29_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1196] model_decoder_layers_29_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] model_decoder_layers_29_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1198] model_decoder_layers_29_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] model_decoder_layers_29_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1200] model_decoder_layers_29_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1201] model_decoder_layers_29_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1202] model_decoder_layers_29_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] model_decoder_layers_29_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1204] model_decoder_layers_29_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] model_decoder_layers_29_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1206] model_decoder_layers_29_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1207] model_decoder_layers_29_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1208] model_decoder_layers_30_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] model_decoder_layers_30_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] model_decoder_layers_30_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1211] model_decoder_layers_30_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] model_decoder_layers_30_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1213] model_decoder_layers_30_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] model_decoder_layers_30_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1215] model_decoder_layers_30_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1216] model_decoder_layers_30_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1217] model_decoder_layers_30_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] model_decoder_layers_30_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] model_decoder_layers_30_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1220] model_decoder_layers_30_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] model_decoder_layers_30_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1222] model_decoder_layers_30_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] model_decoder_layers_30_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1224] model_decoder_layers_30_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1225] model_decoder_layers_30_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1226] model_decoder_layers_30_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] model_decoder_layers_30_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1228] model_decoder_layers_30_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] model_decoder_layers_30_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1230] model_decoder_layers_30_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1231] model_decoder_layers_30_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1232] model_decoder_layers_31_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] model_decoder_layers_31_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] model_decoder_layers_31_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1235] model_decoder_layers_31_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] model_decoder_layers_31_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1237] model_decoder_layers_31_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] model_decoder_layers_31_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1239] model_decoder_layers_31_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1240] model_decoder_layers_31_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1241] model_decoder_layers_31_encoder_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] model_decoder_layers_31_encoder_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] model_decoder_layers_31_encoder_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1244] model_decoder_layers_31_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] model_decoder_layers_31_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1246] model_decoder_layers_31_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] model_decoder_layers_31_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1248] model_decoder_layers_31_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1249] model_decoder_layers_31_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1250] model_decoder_layers_31_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] model_decoder_layers_31_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1252] model_decoder_layers_31_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] model_decoder_layers_31_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1254] model_decoder_layers_31_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1255] model_decoder_layers_31_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1256] model_decoder_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1257] model_decoder_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1258] reshape707: R.Tensor((batch_size,), dtype="int32") = R.reshape(input_ids, R.shape([batch_size])) take3: R.Tensor((batch_size, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight3, reshape707, axis=0) reshape708: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(take3, R.shape([batch_size, 1, 1280])) lv133: R.Tensor((batch_size,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((batch_size,), dtype="int32"),)) take4: R.Tensor((batch_size, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight3, lv133, axis=0) reshape709: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(take4, R.shape([batch_size, 1, 1280])) add578: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(reshape708, reshape709) layer_norm162: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add578, model_decoder_layers_0_self_attn_layer_norm_weight3, model_decoder_layers_0_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims514: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_q_proj_weight3, axes=None) matmul513: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm162, permute_dims514, out_dtype="void") add579: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul513, model_decoder_layers_0_self_attn_q_proj_bias3) reshape710: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add579, R.shape([batch_size, 1, 20, 64])) permute_dims515: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_k_proj_weight3, axes=None) matmul514: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm162, permute_dims515, out_dtype="void") reshape711: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul514, R.shape([batch_size, 1, 20, 64])) permute_dims516: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_v_proj_weight3, axes=None) matmul515: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm162, permute_dims516, out_dtype="void") add580: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul515, model_decoder_layers_0_self_attn_v_proj_bias3) reshape712: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add580, R.shape([batch_size, 1, 20, 64])) concat32: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape710, reshape711, reshape712), axis=2) reshape713: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat32, R.shape([batch_size, 60, 64])) lv134 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape713), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape714: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv134, R.shape([batch_size, 1, 20, 64])) reshape715: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape714, R.shape([batch_size, 1, 1280])) permute_dims517: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_out_proj_weight3, axes=None) matmul516: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape715, permute_dims517, out_dtype="void") add581: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul516, model_decoder_layers_0_self_attn_out_proj_bias3) add582: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add578, add581) layer_norm163: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add582, model_decoder_layers_0_encoder_attn_layer_norm_weight3, model_decoder_layers_0_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims518: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_q_proj_weight3, axes=None) matmul517: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm163, permute_dims518, out_dtype="void") add583: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul517, model_decoder_layers_0_encoder_attn_q_proj_bias3) reshape716: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add583, R.shape([batch_size, 1, 20, 64])) reshape717: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape716, R.shape([batch_size, 20, 64])) lv135 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape717), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape718: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv135, R.shape([batch_size, 1, 20, 64])) reshape719: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape718, R.shape([batch_size, 1, 1280])) permute_dims519: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_out_proj_weight3, axes=None) matmul518: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape719, permute_dims519, out_dtype="void") add584: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul518, model_decoder_layers_0_encoder_attn_out_proj_bias3) add585: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add582, add584) layer_norm164: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add585, model_decoder_layers_0_final_layer_norm_weight3, model_decoder_layers_0_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims520: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc1_weight3, axes=None) matmul519: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm164, permute_dims520, out_dtype="void") add586: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul519, model_decoder_layers_0_fc1_bias3) gelu66: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add586) permute_dims521: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc2_weight3, axes=None) matmul520: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu66, permute_dims521, out_dtype="void") add587: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul520, model_decoder_layers_0_fc2_bias3) add588: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add585, add587) layer_norm165: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add588, model_decoder_layers_1_self_attn_layer_norm_weight3, model_decoder_layers_1_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims522: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_q_proj_weight3, axes=None) matmul521: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm165, permute_dims522, out_dtype="void") add589: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul521, model_decoder_layers_1_self_attn_q_proj_bias3) reshape720: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add589, R.shape([batch_size, 1, 20, 64])) permute_dims523: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_k_proj_weight3, axes=None) matmul522: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm165, permute_dims523, out_dtype="void") reshape721: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul522, R.shape([batch_size, 1, 20, 64])) permute_dims524: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_v_proj_weight3, axes=None) matmul523: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm165, permute_dims524, out_dtype="void") add590: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul523, model_decoder_layers_1_self_attn_v_proj_bias3) reshape722: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add590, R.shape([batch_size, 1, 20, 64])) concat33: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape720, reshape721, reshape722), axis=2) reshape723: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat33, R.shape([batch_size, 60, 64])) lv136 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape723), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape724: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv136, R.shape([batch_size, 1, 20, 64])) reshape725: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape724, R.shape([batch_size, 1, 1280])) permute_dims525: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_out_proj_weight3, axes=None) matmul524: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape725, permute_dims525, out_dtype="void") add591: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul524, model_decoder_layers_1_self_attn_out_proj_bias3) add592: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add588, add591) layer_norm166: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add592, model_decoder_layers_1_encoder_attn_layer_norm_weight3, model_decoder_layers_1_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims526: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_q_proj_weight3, axes=None) matmul525: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm166, permute_dims526, out_dtype="void") add593: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul525, model_decoder_layers_1_encoder_attn_q_proj_bias3) reshape726: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add593, R.shape([batch_size, 1, 20, 64])) reshape727: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape726, R.shape([batch_size, 20, 64])) lv137 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape727), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape728: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv137, R.shape([batch_size, 1, 20, 64])) reshape729: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape728, R.shape([batch_size, 1, 1280])) permute_dims527: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_out_proj_weight3, axes=None) matmul526: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape729, permute_dims527, out_dtype="void") add594: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul526, model_decoder_layers_1_encoder_attn_out_proj_bias3) add595: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add592, add594) layer_norm167: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add595, model_decoder_layers_1_final_layer_norm_weight3, model_decoder_layers_1_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims528: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc1_weight3, axes=None) matmul527: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm167, permute_dims528, out_dtype="void") add596: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul527, model_decoder_layers_1_fc1_bias3) gelu67: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add596) permute_dims529: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc2_weight3, axes=None) matmul528: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu67, permute_dims529, out_dtype="void") add597: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul528, model_decoder_layers_1_fc2_bias3) add598: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add595, add597) layer_norm168: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add598, model_decoder_layers_2_self_attn_layer_norm_weight3, model_decoder_layers_2_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims530: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_q_proj_weight3, axes=None) matmul529: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm168, permute_dims530, out_dtype="void") add599: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul529, model_decoder_layers_2_self_attn_q_proj_bias3) reshape730: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add599, R.shape([batch_size, 1, 20, 64])) permute_dims531: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_k_proj_weight3, axes=None) matmul530: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm168, permute_dims531, out_dtype="void") reshape731: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul530, R.shape([batch_size, 1, 20, 64])) permute_dims532: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_v_proj_weight3, axes=None) matmul531: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm168, permute_dims532, out_dtype="void") add600: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul531, model_decoder_layers_2_self_attn_v_proj_bias3) reshape732: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add600, R.shape([batch_size, 1, 20, 64])) concat34: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape730, reshape731, reshape732), axis=2) reshape733: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat34, R.shape([batch_size, 60, 64])) lv138 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape733), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape734: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv138, R.shape([batch_size, 1, 20, 64])) reshape735: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape734, R.shape([batch_size, 1, 1280])) permute_dims533: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_out_proj_weight3, axes=None) matmul532: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape735, permute_dims533, out_dtype="void") add601: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul532, model_decoder_layers_2_self_attn_out_proj_bias3) add602: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add598, add601) layer_norm169: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add602, model_decoder_layers_2_encoder_attn_layer_norm_weight3, model_decoder_layers_2_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims534: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_q_proj_weight3, axes=None) matmul533: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm169, permute_dims534, out_dtype="void") add603: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul533, model_decoder_layers_2_encoder_attn_q_proj_bias3) reshape736: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add603, R.shape([batch_size, 1, 20, 64])) reshape737: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape736, R.shape([batch_size, 20, 64])) lv139 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape737), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape738: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv139, R.shape([batch_size, 1, 20, 64])) reshape739: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape738, R.shape([batch_size, 1, 1280])) permute_dims535: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_out_proj_weight3, axes=None) matmul534: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape739, permute_dims535, out_dtype="void") add604: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul534, model_decoder_layers_2_encoder_attn_out_proj_bias3) add605: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add602, add604) layer_norm170: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add605, model_decoder_layers_2_final_layer_norm_weight3, model_decoder_layers_2_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims536: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc1_weight3, axes=None) matmul535: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm170, permute_dims536, out_dtype="void") add606: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul535, model_decoder_layers_2_fc1_bias3) gelu68: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add606) permute_dims537: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc2_weight3, axes=None) matmul536: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu68, permute_dims537, out_dtype="void") add607: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul536, model_decoder_layers_2_fc2_bias3) add608: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add605, add607) layer_norm171: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add608, model_decoder_layers_3_self_attn_layer_norm_weight3, model_decoder_layers_3_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims538: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_q_proj_weight3, axes=None) matmul537: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm171, permute_dims538, out_dtype="void") add609: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul537, model_decoder_layers_3_self_attn_q_proj_bias3) reshape740: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add609, R.shape([batch_size, 1, 20, 64])) permute_dims539: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_k_proj_weight3, axes=None) matmul538: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm171, permute_dims539, out_dtype="void") reshape741: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul538, R.shape([batch_size, 1, 20, 64])) permute_dims540: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_v_proj_weight3, axes=None) matmul539: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm171, permute_dims540, out_dtype="void") add610: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul539, model_decoder_layers_3_self_attn_v_proj_bias3) reshape742: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add610, R.shape([batch_size, 1, 20, 64])) concat35: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape740, reshape741, reshape742), axis=2) reshape743: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat35, R.shape([batch_size, 60, 64])) lv140 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape743), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape744: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv140, R.shape([batch_size, 1, 20, 64])) reshape745: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape744, R.shape([batch_size, 1, 1280])) permute_dims541: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_out_proj_weight3, axes=None) matmul540: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape745, permute_dims541, out_dtype="void") add611: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul540, model_decoder_layers_3_self_attn_out_proj_bias3) add612: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add608, add611) layer_norm172: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add612, model_decoder_layers_3_encoder_attn_layer_norm_weight3, model_decoder_layers_3_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims542: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_q_proj_weight3, axes=None) matmul541: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm172, permute_dims542, out_dtype="void") add613: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul541, model_decoder_layers_3_encoder_attn_q_proj_bias3) reshape746: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add613, R.shape([batch_size, 1, 20, 64])) reshape747: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape746, R.shape([batch_size, 20, 64])) lv141 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape747), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape748: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv141, R.shape([batch_size, 1, 20, 64])) reshape749: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape748, R.shape([batch_size, 1, 1280])) permute_dims543: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_out_proj_weight3, axes=None) matmul542: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape749, permute_dims543, out_dtype="void") add614: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul542, model_decoder_layers_3_encoder_attn_out_proj_bias3) add615: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add612, add614) layer_norm173: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add615, model_decoder_layers_3_final_layer_norm_weight3, model_decoder_layers_3_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims544: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc1_weight3, axes=None) matmul543: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm173, permute_dims544, out_dtype="void") add616: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul543, model_decoder_layers_3_fc1_bias3) gelu69: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add616) permute_dims545: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc2_weight3, axes=None) matmul544: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu69, permute_dims545, out_dtype="void") add617: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul544, model_decoder_layers_3_fc2_bias3) add618: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add615, add617) layer_norm174: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add618, model_decoder_layers_4_self_attn_layer_norm_weight3, model_decoder_layers_4_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims546: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_q_proj_weight3, axes=None) matmul545: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm174, permute_dims546, out_dtype="void") add619: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul545, model_decoder_layers_4_self_attn_q_proj_bias3) reshape750: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add619, R.shape([batch_size, 1, 20, 64])) permute_dims547: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_k_proj_weight3, axes=None) matmul546: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm174, permute_dims547, out_dtype="void") reshape751: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul546, R.shape([batch_size, 1, 20, 64])) permute_dims548: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_v_proj_weight3, axes=None) matmul547: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm174, permute_dims548, out_dtype="void") add620: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul547, model_decoder_layers_4_self_attn_v_proj_bias3) reshape752: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add620, R.shape([batch_size, 1, 20, 64])) concat36: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape750, reshape751, reshape752), axis=2) reshape753: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat36, R.shape([batch_size, 60, 64])) lv142 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape753), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape754: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv142, R.shape([batch_size, 1, 20, 64])) reshape755: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape754, R.shape([batch_size, 1, 1280])) permute_dims549: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_out_proj_weight3, axes=None) matmul548: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape755, permute_dims549, out_dtype="void") add621: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul548, model_decoder_layers_4_self_attn_out_proj_bias3) add622: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add618, add621) layer_norm175: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add622, model_decoder_layers_4_encoder_attn_layer_norm_weight3, model_decoder_layers_4_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims550: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_q_proj_weight3, axes=None) matmul549: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm175, permute_dims550, out_dtype="void") add623: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul549, model_decoder_layers_4_encoder_attn_q_proj_bias3) reshape756: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add623, R.shape([batch_size, 1, 20, 64])) reshape757: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape756, R.shape([batch_size, 20, 64])) lv143 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape757), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape758: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv143, R.shape([batch_size, 1, 20, 64])) reshape759: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape758, R.shape([batch_size, 1, 1280])) permute_dims551: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_out_proj_weight3, axes=None) matmul550: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape759, permute_dims551, out_dtype="void") add624: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul550, model_decoder_layers_4_encoder_attn_out_proj_bias3) add625: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add622, add624) layer_norm176: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add625, model_decoder_layers_4_final_layer_norm_weight3, model_decoder_layers_4_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims552: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc1_weight3, axes=None) matmul551: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm176, permute_dims552, out_dtype="void") add626: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul551, model_decoder_layers_4_fc1_bias3) gelu70: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add626) permute_dims553: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc2_weight3, axes=None) matmul552: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu70, permute_dims553, out_dtype="void") add627: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul552, model_decoder_layers_4_fc2_bias3) add628: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add625, add627) layer_norm177: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add628, model_decoder_layers_5_self_attn_layer_norm_weight3, model_decoder_layers_5_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims554: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_q_proj_weight3, axes=None) matmul553: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm177, permute_dims554, out_dtype="void") add629: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul553, model_decoder_layers_5_self_attn_q_proj_bias3) reshape760: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add629, R.shape([batch_size, 1, 20, 64])) permute_dims555: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_k_proj_weight3, axes=None) matmul554: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm177, permute_dims555, out_dtype="void") reshape761: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul554, R.shape([batch_size, 1, 20, 64])) permute_dims556: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_v_proj_weight3, axes=None) matmul555: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm177, permute_dims556, out_dtype="void") add630: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul555, model_decoder_layers_5_self_attn_v_proj_bias3) reshape762: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add630, R.shape([batch_size, 1, 20, 64])) concat37: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape760, reshape761, reshape762), axis=2) reshape763: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat37, R.shape([batch_size, 60, 64])) lv144 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape763), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape764: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv144, R.shape([batch_size, 1, 20, 64])) reshape765: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape764, R.shape([batch_size, 1, 1280])) permute_dims557: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_out_proj_weight3, axes=None) matmul556: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape765, permute_dims557, out_dtype="void") add631: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul556, model_decoder_layers_5_self_attn_out_proj_bias3) add632: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add628, add631) layer_norm178: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add632, model_decoder_layers_5_encoder_attn_layer_norm_weight3, model_decoder_layers_5_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims558: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_q_proj_weight3, axes=None) matmul557: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm178, permute_dims558, out_dtype="void") add633: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul557, model_decoder_layers_5_encoder_attn_q_proj_bias3) reshape766: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add633, R.shape([batch_size, 1, 20, 64])) reshape767: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape766, R.shape([batch_size, 20, 64])) lv145 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape767), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape768: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv145, R.shape([batch_size, 1, 20, 64])) reshape769: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape768, R.shape([batch_size, 1, 1280])) permute_dims559: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_out_proj_weight3, axes=None) matmul558: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape769, permute_dims559, out_dtype="void") add634: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul558, model_decoder_layers_5_encoder_attn_out_proj_bias3) add635: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add632, add634) layer_norm179: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add635, model_decoder_layers_5_final_layer_norm_weight3, model_decoder_layers_5_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims560: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc1_weight3, axes=None) matmul559: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm179, permute_dims560, out_dtype="void") add636: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul559, model_decoder_layers_5_fc1_bias3) gelu71: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add636) permute_dims561: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc2_weight3, axes=None) matmul560: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu71, permute_dims561, out_dtype="void") add637: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul560, model_decoder_layers_5_fc2_bias3) add638: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add635, add637) layer_norm180: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add638, model_decoder_layers_6_self_attn_layer_norm_weight3, model_decoder_layers_6_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims562: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_q_proj_weight3, axes=None) matmul561: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm180, permute_dims562, out_dtype="void") add639: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul561, model_decoder_layers_6_self_attn_q_proj_bias3) reshape770: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add639, R.shape([batch_size, 1, 20, 64])) permute_dims563: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_k_proj_weight3, axes=None) matmul562: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm180, permute_dims563, out_dtype="void") reshape771: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul562, R.shape([batch_size, 1, 20, 64])) permute_dims564: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_v_proj_weight3, axes=None) matmul563: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm180, permute_dims564, out_dtype="void") add640: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul563, model_decoder_layers_6_self_attn_v_proj_bias3) reshape772: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add640, R.shape([batch_size, 1, 20, 64])) concat38: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape770, reshape771, reshape772), axis=2) reshape773: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat38, R.shape([batch_size, 60, 64])) lv146 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape773), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape774: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv146, R.shape([batch_size, 1, 20, 64])) reshape775: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape774, R.shape([batch_size, 1, 1280])) permute_dims565: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_out_proj_weight3, axes=None) matmul564: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape775, permute_dims565, out_dtype="void") add641: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul564, model_decoder_layers_6_self_attn_out_proj_bias3) add642: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add638, add641) layer_norm181: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add642, model_decoder_layers_6_encoder_attn_layer_norm_weight3, model_decoder_layers_6_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims566: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_q_proj_weight3, axes=None) matmul565: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm181, permute_dims566, out_dtype="void") add643: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul565, model_decoder_layers_6_encoder_attn_q_proj_bias3) reshape776: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add643, R.shape([batch_size, 1, 20, 64])) reshape777: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape776, R.shape([batch_size, 20, 64])) lv147 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape777), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape778: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv147, R.shape([batch_size, 1, 20, 64])) reshape779: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape778, R.shape([batch_size, 1, 1280])) permute_dims567: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_out_proj_weight3, axes=None) matmul566: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape779, permute_dims567, out_dtype="void") add644: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul566, model_decoder_layers_6_encoder_attn_out_proj_bias3) add645: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add642, add644) layer_norm182: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add645, model_decoder_layers_6_final_layer_norm_weight3, model_decoder_layers_6_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims568: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc1_weight3, axes=None) matmul567: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm182, permute_dims568, out_dtype="void") add646: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul567, model_decoder_layers_6_fc1_bias3) gelu72: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add646) permute_dims569: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc2_weight3, axes=None) matmul568: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu72, permute_dims569, out_dtype="void") add647: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul568, model_decoder_layers_6_fc2_bias3) add648: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add645, add647) layer_norm183: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add648, model_decoder_layers_7_self_attn_layer_norm_weight3, model_decoder_layers_7_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims570: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_q_proj_weight3, axes=None) matmul569: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm183, permute_dims570, out_dtype="void") add649: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul569, model_decoder_layers_7_self_attn_q_proj_bias3) reshape780: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add649, R.shape([batch_size, 1, 20, 64])) permute_dims571: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_k_proj_weight3, axes=None) matmul570: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm183, permute_dims571, out_dtype="void") reshape781: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul570, R.shape([batch_size, 1, 20, 64])) permute_dims572: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_v_proj_weight3, axes=None) matmul571: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm183, permute_dims572, out_dtype="void") add650: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul571, model_decoder_layers_7_self_attn_v_proj_bias3) reshape782: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add650, R.shape([batch_size, 1, 20, 64])) concat39: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape780, reshape781, reshape782), axis=2) reshape783: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat39, R.shape([batch_size, 60, 64])) lv148 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape783), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape784: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv148, R.shape([batch_size, 1, 20, 64])) reshape785: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape784, R.shape([batch_size, 1, 1280])) permute_dims573: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_out_proj_weight3, axes=None) matmul572: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape785, permute_dims573, out_dtype="void") add651: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul572, model_decoder_layers_7_self_attn_out_proj_bias3) add652: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add648, add651) layer_norm184: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add652, model_decoder_layers_7_encoder_attn_layer_norm_weight3, model_decoder_layers_7_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims574: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_q_proj_weight3, axes=None) matmul573: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm184, permute_dims574, out_dtype="void") add653: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul573, model_decoder_layers_7_encoder_attn_q_proj_bias3) reshape786: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add653, R.shape([batch_size, 1, 20, 64])) reshape787: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape786, R.shape([batch_size, 20, 64])) lv149 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape787), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape788: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv149, R.shape([batch_size, 1, 20, 64])) reshape789: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape788, R.shape([batch_size, 1, 1280])) permute_dims575: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_out_proj_weight3, axes=None) matmul574: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape789, permute_dims575, out_dtype="void") add654: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul574, model_decoder_layers_7_encoder_attn_out_proj_bias3) add655: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add652, add654) layer_norm185: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add655, model_decoder_layers_7_final_layer_norm_weight3, model_decoder_layers_7_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims576: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc1_weight3, axes=None) matmul575: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm185, permute_dims576, out_dtype="void") add656: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul575, model_decoder_layers_7_fc1_bias3) gelu73: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add656) permute_dims577: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc2_weight3, axes=None) matmul576: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu73, permute_dims577, out_dtype="void") add657: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul576, model_decoder_layers_7_fc2_bias3) add658: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add655, add657) layer_norm186: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add658, model_decoder_layers_8_self_attn_layer_norm_weight3, model_decoder_layers_8_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims578: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_q_proj_weight3, axes=None) matmul577: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm186, permute_dims578, out_dtype="void") add659: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul577, model_decoder_layers_8_self_attn_q_proj_bias3) reshape790: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add659, R.shape([batch_size, 1, 20, 64])) permute_dims579: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_k_proj_weight3, axes=None) matmul578: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm186, permute_dims579, out_dtype="void") reshape791: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul578, R.shape([batch_size, 1, 20, 64])) permute_dims580: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_v_proj_weight3, axes=None) matmul579: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm186, permute_dims580, out_dtype="void") add660: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul579, model_decoder_layers_8_self_attn_v_proj_bias3) reshape792: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add660, R.shape([batch_size, 1, 20, 64])) concat40: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape790, reshape791, reshape792), axis=2) reshape793: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat40, R.shape([batch_size, 60, 64])) lv150 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape793), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape794: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv150, R.shape([batch_size, 1, 20, 64])) reshape795: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape794, R.shape([batch_size, 1, 1280])) permute_dims581: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_out_proj_weight3, axes=None) matmul580: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape795, permute_dims581, out_dtype="void") add661: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul580, model_decoder_layers_8_self_attn_out_proj_bias3) add662: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add658, add661) layer_norm187: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add662, model_decoder_layers_8_encoder_attn_layer_norm_weight3, model_decoder_layers_8_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims582: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_q_proj_weight3, axes=None) matmul581: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm187, permute_dims582, out_dtype="void") add663: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul581, model_decoder_layers_8_encoder_attn_q_proj_bias3) reshape796: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add663, R.shape([batch_size, 1, 20, 64])) reshape797: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape796, R.shape([batch_size, 20, 64])) lv151 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape797), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape798: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv151, R.shape([batch_size, 1, 20, 64])) reshape799: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape798, R.shape([batch_size, 1, 1280])) permute_dims583: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_out_proj_weight3, axes=None) matmul582: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape799, permute_dims583, out_dtype="void") add664: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul582, model_decoder_layers_8_encoder_attn_out_proj_bias3) add665: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add662, add664) layer_norm188: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add665, model_decoder_layers_8_final_layer_norm_weight3, model_decoder_layers_8_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims584: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc1_weight3, axes=None) matmul583: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm188, permute_dims584, out_dtype="void") add666: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul583, model_decoder_layers_8_fc1_bias3) gelu74: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add666) permute_dims585: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc2_weight3, axes=None) matmul584: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu74, permute_dims585, out_dtype="void") add667: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul584, model_decoder_layers_8_fc2_bias3) add668: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add665, add667) layer_norm189: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add668, model_decoder_layers_9_self_attn_layer_norm_weight3, model_decoder_layers_9_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims586: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_q_proj_weight3, axes=None) matmul585: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm189, permute_dims586, out_dtype="void") add669: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul585, model_decoder_layers_9_self_attn_q_proj_bias3) reshape800: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add669, R.shape([batch_size, 1, 20, 64])) permute_dims587: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_k_proj_weight3, axes=None) matmul586: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm189, permute_dims587, out_dtype="void") reshape801: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul586, R.shape([batch_size, 1, 20, 64])) permute_dims588: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_v_proj_weight3, axes=None) matmul587: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm189, permute_dims588, out_dtype="void") add670: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul587, model_decoder_layers_9_self_attn_v_proj_bias3) reshape802: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add670, R.shape([batch_size, 1, 20, 64])) concat41: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape800, reshape801, reshape802), axis=2) reshape803: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat41, R.shape([batch_size, 60, 64])) lv152 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape803), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape804: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv152, R.shape([batch_size, 1, 20, 64])) reshape805: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape804, R.shape([batch_size, 1, 1280])) permute_dims589: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_out_proj_weight3, axes=None) matmul588: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape805, permute_dims589, out_dtype="void") add671: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul588, model_decoder_layers_9_self_attn_out_proj_bias3) add672: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add668, add671) layer_norm190: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add672, model_decoder_layers_9_encoder_attn_layer_norm_weight3, model_decoder_layers_9_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims590: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_q_proj_weight3, axes=None) matmul589: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm190, permute_dims590, out_dtype="void") add673: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul589, model_decoder_layers_9_encoder_attn_q_proj_bias3) reshape806: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add673, R.shape([batch_size, 1, 20, 64])) reshape807: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape806, R.shape([batch_size, 20, 64])) lv153 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape807), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape808: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv153, R.shape([batch_size, 1, 20, 64])) reshape809: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape808, R.shape([batch_size, 1, 1280])) permute_dims591: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_out_proj_weight3, axes=None) matmul590: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape809, permute_dims591, out_dtype="void") add674: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul590, model_decoder_layers_9_encoder_attn_out_proj_bias3) add675: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add672, add674) layer_norm191: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add675, model_decoder_layers_9_final_layer_norm_weight3, model_decoder_layers_9_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims592: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc1_weight3, axes=None) matmul591: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm191, permute_dims592, out_dtype="void") add676: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul591, model_decoder_layers_9_fc1_bias3) gelu75: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add676) permute_dims593: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc2_weight3, axes=None) matmul592: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu75, permute_dims593, out_dtype="void") add677: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul592, model_decoder_layers_9_fc2_bias3) add678: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add675, add677) layer_norm192: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add678, model_decoder_layers_10_self_attn_layer_norm_weight3, model_decoder_layers_10_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims594: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_q_proj_weight3, axes=None) matmul593: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm192, permute_dims594, out_dtype="void") add679: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul593, model_decoder_layers_10_self_attn_q_proj_bias3) reshape810: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add679, R.shape([batch_size, 1, 20, 64])) permute_dims595: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_k_proj_weight3, axes=None) matmul594: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm192, permute_dims595, out_dtype="void") reshape811: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul594, R.shape([batch_size, 1, 20, 64])) permute_dims596: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_v_proj_weight3, axes=None) matmul595: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm192, permute_dims596, out_dtype="void") add680: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul595, model_decoder_layers_10_self_attn_v_proj_bias3) reshape812: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add680, R.shape([batch_size, 1, 20, 64])) concat42: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape810, reshape811, reshape812), axis=2) reshape813: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat42, R.shape([batch_size, 60, 64])) lv154 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape813), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape814: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv154, R.shape([batch_size, 1, 20, 64])) reshape815: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape814, R.shape([batch_size, 1, 1280])) permute_dims597: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_out_proj_weight3, axes=None) matmul596: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape815, permute_dims597, out_dtype="void") add681: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul596, model_decoder_layers_10_self_attn_out_proj_bias3) add682: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add678, add681) layer_norm193: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add682, model_decoder_layers_10_encoder_attn_layer_norm_weight3, model_decoder_layers_10_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims598: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_q_proj_weight3, axes=None) matmul597: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm193, permute_dims598, out_dtype="void") add683: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul597, model_decoder_layers_10_encoder_attn_q_proj_bias3) reshape816: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add683, R.shape([batch_size, 1, 20, 64])) reshape817: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape816, R.shape([batch_size, 20, 64])) lv155 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape817), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape818: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv155, R.shape([batch_size, 1, 20, 64])) reshape819: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape818, R.shape([batch_size, 1, 1280])) permute_dims599: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_out_proj_weight3, axes=None) matmul598: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape819, permute_dims599, out_dtype="void") add684: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul598, model_decoder_layers_10_encoder_attn_out_proj_bias3) add685: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add682, add684) layer_norm194: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add685, model_decoder_layers_10_final_layer_norm_weight3, model_decoder_layers_10_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims600: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc1_weight3, axes=None) matmul599: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm194, permute_dims600, out_dtype="void") add686: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul599, model_decoder_layers_10_fc1_bias3) gelu76: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add686) permute_dims601: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc2_weight3, axes=None) matmul600: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu76, permute_dims601, out_dtype="void") add687: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul600, model_decoder_layers_10_fc2_bias3) add688: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add685, add687) layer_norm195: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add688, model_decoder_layers_11_self_attn_layer_norm_weight3, model_decoder_layers_11_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims602: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_q_proj_weight3, axes=None) matmul601: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm195, permute_dims602, out_dtype="void") add689: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul601, model_decoder_layers_11_self_attn_q_proj_bias3) reshape820: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add689, R.shape([batch_size, 1, 20, 64])) permute_dims603: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_k_proj_weight3, axes=None) matmul602: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm195, permute_dims603, out_dtype="void") reshape821: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul602, R.shape([batch_size, 1, 20, 64])) permute_dims604: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_v_proj_weight3, axes=None) matmul603: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm195, permute_dims604, out_dtype="void") add690: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul603, model_decoder_layers_11_self_attn_v_proj_bias3) reshape822: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add690, R.shape([batch_size, 1, 20, 64])) concat43: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape820, reshape821, reshape822), axis=2) reshape823: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat43, R.shape([batch_size, 60, 64])) lv156 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape823), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape824: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv156, R.shape([batch_size, 1, 20, 64])) reshape825: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape824, R.shape([batch_size, 1, 1280])) permute_dims605: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_out_proj_weight3, axes=None) matmul604: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape825, permute_dims605, out_dtype="void") add691: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul604, model_decoder_layers_11_self_attn_out_proj_bias3) add692: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add688, add691) layer_norm196: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add692, model_decoder_layers_11_encoder_attn_layer_norm_weight3, model_decoder_layers_11_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims606: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_q_proj_weight3, axes=None) matmul605: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm196, permute_dims606, out_dtype="void") add693: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul605, model_decoder_layers_11_encoder_attn_q_proj_bias3) reshape826: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add693, R.shape([batch_size, 1, 20, 64])) reshape827: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape826, R.shape([batch_size, 20, 64])) lv157 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape827), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape828: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv157, R.shape([batch_size, 1, 20, 64])) reshape829: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape828, R.shape([batch_size, 1, 1280])) permute_dims607: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_out_proj_weight3, axes=None) matmul606: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape829, permute_dims607, out_dtype="void") add694: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul606, model_decoder_layers_11_encoder_attn_out_proj_bias3) add695: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add692, add694) layer_norm197: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add695, model_decoder_layers_11_final_layer_norm_weight3, model_decoder_layers_11_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims608: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc1_weight3, axes=None) matmul607: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm197, permute_dims608, out_dtype="void") add696: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul607, model_decoder_layers_11_fc1_bias3) gelu77: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add696) permute_dims609: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc2_weight3, axes=None) matmul608: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu77, permute_dims609, out_dtype="void") add697: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul608, model_decoder_layers_11_fc2_bias3) add698: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add695, add697) layer_norm198: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add698, model_decoder_layers_12_self_attn_layer_norm_weight3, model_decoder_layers_12_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims610: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_q_proj_weight3, axes=None) matmul609: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm198, permute_dims610, out_dtype="void") add699: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul609, model_decoder_layers_12_self_attn_q_proj_bias3) reshape830: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add699, R.shape([batch_size, 1, 20, 64])) permute_dims611: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_k_proj_weight3, axes=None) matmul610: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm198, permute_dims611, out_dtype="void") reshape831: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul610, R.shape([batch_size, 1, 20, 64])) permute_dims612: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_v_proj_weight3, axes=None) matmul611: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm198, permute_dims612, out_dtype="void") add700: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul611, model_decoder_layers_12_self_attn_v_proj_bias3) reshape832: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add700, R.shape([batch_size, 1, 20, 64])) concat44: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape830, reshape831, reshape832), axis=2) reshape833: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat44, R.shape([batch_size, 60, 64])) lv158 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape833), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape834: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv158, R.shape([batch_size, 1, 20, 64])) reshape835: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape834, R.shape([batch_size, 1, 1280])) permute_dims613: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_out_proj_weight3, axes=None) matmul612: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape835, permute_dims613, out_dtype="void") add701: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul612, model_decoder_layers_12_self_attn_out_proj_bias3) add702: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add698, add701) layer_norm199: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add702, model_decoder_layers_12_encoder_attn_layer_norm_weight3, model_decoder_layers_12_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims614: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_q_proj_weight3, axes=None) matmul613: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm199, permute_dims614, out_dtype="void") add703: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul613, model_decoder_layers_12_encoder_attn_q_proj_bias3) reshape836: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add703, R.shape([batch_size, 1, 20, 64])) reshape837: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape836, R.shape([batch_size, 20, 64])) lv159 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape837), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape838: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv159, R.shape([batch_size, 1, 20, 64])) reshape839: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape838, R.shape([batch_size, 1, 1280])) permute_dims615: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_out_proj_weight3, axes=None) matmul614: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape839, permute_dims615, out_dtype="void") add704: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul614, model_decoder_layers_12_encoder_attn_out_proj_bias3) add705: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add702, add704) layer_norm200: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add705, model_decoder_layers_12_final_layer_norm_weight3, model_decoder_layers_12_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims616: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc1_weight3, axes=None) matmul615: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm200, permute_dims616, out_dtype="void") add706: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul615, model_decoder_layers_12_fc1_bias3) gelu78: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add706) permute_dims617: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc2_weight3, axes=None) matmul616: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu78, permute_dims617, out_dtype="void") add707: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul616, model_decoder_layers_12_fc2_bias3) add708: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add705, add707) layer_norm201: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add708, model_decoder_layers_13_self_attn_layer_norm_weight3, model_decoder_layers_13_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims618: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_q_proj_weight3, axes=None) matmul617: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm201, permute_dims618, out_dtype="void") add709: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul617, model_decoder_layers_13_self_attn_q_proj_bias3) reshape840: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add709, R.shape([batch_size, 1, 20, 64])) permute_dims619: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_k_proj_weight3, axes=None) matmul618: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm201, permute_dims619, out_dtype="void") reshape841: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul618, R.shape([batch_size, 1, 20, 64])) permute_dims620: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_v_proj_weight3, axes=None) matmul619: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm201, permute_dims620, out_dtype="void") add710: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul619, model_decoder_layers_13_self_attn_v_proj_bias3) reshape842: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add710, R.shape([batch_size, 1, 20, 64])) concat45: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape840, reshape841, reshape842), axis=2) reshape843: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat45, R.shape([batch_size, 60, 64])) lv160 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape843), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape844: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv160, R.shape([batch_size, 1, 20, 64])) reshape845: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape844, R.shape([batch_size, 1, 1280])) permute_dims621: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_out_proj_weight3, axes=None) matmul620: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape845, permute_dims621, out_dtype="void") add711: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul620, model_decoder_layers_13_self_attn_out_proj_bias3) add712: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add708, add711) layer_norm202: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add712, model_decoder_layers_13_encoder_attn_layer_norm_weight3, model_decoder_layers_13_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims622: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_q_proj_weight3, axes=None) matmul621: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm202, permute_dims622, out_dtype="void") add713: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul621, model_decoder_layers_13_encoder_attn_q_proj_bias3) reshape846: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add713, R.shape([batch_size, 1, 20, 64])) reshape847: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape846, R.shape([batch_size, 20, 64])) lv161 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape847), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape848: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv161, R.shape([batch_size, 1, 20, 64])) reshape849: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape848, R.shape([batch_size, 1, 1280])) permute_dims623: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_out_proj_weight3, axes=None) matmul622: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape849, permute_dims623, out_dtype="void") add714: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul622, model_decoder_layers_13_encoder_attn_out_proj_bias3) add715: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add712, add714) layer_norm203: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add715, model_decoder_layers_13_final_layer_norm_weight3, model_decoder_layers_13_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims624: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc1_weight3, axes=None) matmul623: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm203, permute_dims624, out_dtype="void") add716: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul623, model_decoder_layers_13_fc1_bias3) gelu79: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add716) permute_dims625: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc2_weight3, axes=None) matmul624: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu79, permute_dims625, out_dtype="void") add717: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul624, model_decoder_layers_13_fc2_bias3) add718: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add715, add717) layer_norm204: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add718, model_decoder_layers_14_self_attn_layer_norm_weight3, model_decoder_layers_14_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims626: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_q_proj_weight3, axes=None) matmul625: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm204, permute_dims626, out_dtype="void") add719: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul625, model_decoder_layers_14_self_attn_q_proj_bias3) reshape850: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add719, R.shape([batch_size, 1, 20, 64])) permute_dims627: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_k_proj_weight3, axes=None) matmul626: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm204, permute_dims627, out_dtype="void") reshape851: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul626, R.shape([batch_size, 1, 20, 64])) permute_dims628: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_v_proj_weight3, axes=None) matmul627: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm204, permute_dims628, out_dtype="void") add720: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul627, model_decoder_layers_14_self_attn_v_proj_bias3) reshape852: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add720, R.shape([batch_size, 1, 20, 64])) concat46: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape850, reshape851, reshape852), axis=2) reshape853: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat46, R.shape([batch_size, 60, 64])) lv162 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape853), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape854: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv162, R.shape([batch_size, 1, 20, 64])) reshape855: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape854, R.shape([batch_size, 1, 1280])) permute_dims629: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_out_proj_weight3, axes=None) matmul628: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape855, permute_dims629, out_dtype="void") add721: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul628, model_decoder_layers_14_self_attn_out_proj_bias3) add722: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add718, add721) layer_norm205: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add722, model_decoder_layers_14_encoder_attn_layer_norm_weight3, model_decoder_layers_14_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims630: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_q_proj_weight3, axes=None) matmul629: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm205, permute_dims630, out_dtype="void") add723: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul629, model_decoder_layers_14_encoder_attn_q_proj_bias3) reshape856: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add723, R.shape([batch_size, 1, 20, 64])) reshape857: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape856, R.shape([batch_size, 20, 64])) lv163 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape857), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape858: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv163, R.shape([batch_size, 1, 20, 64])) reshape859: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape858, R.shape([batch_size, 1, 1280])) permute_dims631: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_out_proj_weight3, axes=None) matmul630: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape859, permute_dims631, out_dtype="void") add724: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul630, model_decoder_layers_14_encoder_attn_out_proj_bias3) add725: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add722, add724) layer_norm206: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add725, model_decoder_layers_14_final_layer_norm_weight3, model_decoder_layers_14_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims632: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc1_weight3, axes=None) matmul631: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm206, permute_dims632, out_dtype="void") add726: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul631, model_decoder_layers_14_fc1_bias3) gelu80: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add726) permute_dims633: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc2_weight3, axes=None) matmul632: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu80, permute_dims633, out_dtype="void") add727: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul632, model_decoder_layers_14_fc2_bias3) add728: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add725, add727) layer_norm207: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add728, model_decoder_layers_15_self_attn_layer_norm_weight3, model_decoder_layers_15_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims634: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_q_proj_weight3, axes=None) matmul633: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm207, permute_dims634, out_dtype="void") add729: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul633, model_decoder_layers_15_self_attn_q_proj_bias3) reshape860: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add729, R.shape([batch_size, 1, 20, 64])) permute_dims635: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_k_proj_weight3, axes=None) matmul634: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm207, permute_dims635, out_dtype="void") reshape861: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul634, R.shape([batch_size, 1, 20, 64])) permute_dims636: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_v_proj_weight3, axes=None) matmul635: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm207, permute_dims636, out_dtype="void") add730: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul635, model_decoder_layers_15_self_attn_v_proj_bias3) reshape862: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add730, R.shape([batch_size, 1, 20, 64])) concat47: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape860, reshape861, reshape862), axis=2) reshape863: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat47, R.shape([batch_size, 60, 64])) lv164 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape863), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape864: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv164, R.shape([batch_size, 1, 20, 64])) reshape865: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape864, R.shape([batch_size, 1, 1280])) permute_dims637: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_out_proj_weight3, axes=None) matmul636: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape865, permute_dims637, out_dtype="void") add731: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul636, model_decoder_layers_15_self_attn_out_proj_bias3) add732: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add728, add731) layer_norm208: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add732, model_decoder_layers_15_encoder_attn_layer_norm_weight3, model_decoder_layers_15_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims638: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_q_proj_weight3, axes=None) matmul637: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm208, permute_dims638, out_dtype="void") add733: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul637, model_decoder_layers_15_encoder_attn_q_proj_bias3) reshape866: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add733, R.shape([batch_size, 1, 20, 64])) reshape867: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape866, R.shape([batch_size, 20, 64])) lv165 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape867), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape868: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv165, R.shape([batch_size, 1, 20, 64])) reshape869: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape868, R.shape([batch_size, 1, 1280])) permute_dims639: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_out_proj_weight3, axes=None) matmul638: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape869, permute_dims639, out_dtype="void") add734: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul638, model_decoder_layers_15_encoder_attn_out_proj_bias3) add735: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add732, add734) layer_norm209: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add735, model_decoder_layers_15_final_layer_norm_weight3, model_decoder_layers_15_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims640: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc1_weight3, axes=None) matmul639: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm209, permute_dims640, out_dtype="void") add736: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul639, model_decoder_layers_15_fc1_bias3) gelu81: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add736) permute_dims641: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc2_weight3, axes=None) matmul640: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu81, permute_dims641, out_dtype="void") add737: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul640, model_decoder_layers_15_fc2_bias3) add738: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add735, add737) layer_norm210: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add738, model_decoder_layers_16_self_attn_layer_norm_weight3, model_decoder_layers_16_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims642: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_q_proj_weight3, axes=None) matmul641: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm210, permute_dims642, out_dtype="void") add739: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul641, model_decoder_layers_16_self_attn_q_proj_bias3) reshape870: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add739, R.shape([batch_size, 1, 20, 64])) permute_dims643: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_k_proj_weight3, axes=None) matmul642: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm210, permute_dims643, out_dtype="void") reshape871: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul642, R.shape([batch_size, 1, 20, 64])) permute_dims644: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_v_proj_weight3, axes=None) matmul643: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm210, permute_dims644, out_dtype="void") add740: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul643, model_decoder_layers_16_self_attn_v_proj_bias3) reshape872: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add740, R.shape([batch_size, 1, 20, 64])) concat48: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape870, reshape871, reshape872), axis=2) reshape873: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat48, R.shape([batch_size, 60, 64])) lv166 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape873), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape874: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv166, R.shape([batch_size, 1, 20, 64])) reshape875: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape874, R.shape([batch_size, 1, 1280])) permute_dims645: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_out_proj_weight3, axes=None) matmul644: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape875, permute_dims645, out_dtype="void") add741: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul644, model_decoder_layers_16_self_attn_out_proj_bias3) add742: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add738, add741) layer_norm211: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add742, model_decoder_layers_16_encoder_attn_layer_norm_weight3, model_decoder_layers_16_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims646: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_q_proj_weight3, axes=None) matmul645: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm211, permute_dims646, out_dtype="void") add743: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul645, model_decoder_layers_16_encoder_attn_q_proj_bias3) reshape876: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add743, R.shape([batch_size, 1, 20, 64])) reshape877: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape876, R.shape([batch_size, 20, 64])) lv167 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape877), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape878: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv167, R.shape([batch_size, 1, 20, 64])) reshape879: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape878, R.shape([batch_size, 1, 1280])) permute_dims647: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_out_proj_weight3, axes=None) matmul646: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape879, permute_dims647, out_dtype="void") add744: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul646, model_decoder_layers_16_encoder_attn_out_proj_bias3) add745: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add742, add744) layer_norm212: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add745, model_decoder_layers_16_final_layer_norm_weight3, model_decoder_layers_16_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims648: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc1_weight3, axes=None) matmul647: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm212, permute_dims648, out_dtype="void") add746: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul647, model_decoder_layers_16_fc1_bias3) gelu82: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add746) permute_dims649: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc2_weight3, axes=None) matmul648: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu82, permute_dims649, out_dtype="void") add747: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul648, model_decoder_layers_16_fc2_bias3) add748: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add745, add747) layer_norm213: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add748, model_decoder_layers_17_self_attn_layer_norm_weight3, model_decoder_layers_17_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims650: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_q_proj_weight3, axes=None) matmul649: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm213, permute_dims650, out_dtype="void") add749: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul649, model_decoder_layers_17_self_attn_q_proj_bias3) reshape880: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add749, R.shape([batch_size, 1, 20, 64])) permute_dims651: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_k_proj_weight3, axes=None) matmul650: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm213, permute_dims651, out_dtype="void") reshape881: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul650, R.shape([batch_size, 1, 20, 64])) permute_dims652: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_v_proj_weight3, axes=None) matmul651: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm213, permute_dims652, out_dtype="void") add750: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul651, model_decoder_layers_17_self_attn_v_proj_bias3) reshape882: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add750, R.shape([batch_size, 1, 20, 64])) concat49: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape880, reshape881, reshape882), axis=2) reshape883: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat49, R.shape([batch_size, 60, 64])) lv168 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape883), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape884: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv168, R.shape([batch_size, 1, 20, 64])) reshape885: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape884, R.shape([batch_size, 1, 1280])) permute_dims653: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_out_proj_weight3, axes=None) matmul652: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape885, permute_dims653, out_dtype="void") add751: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul652, model_decoder_layers_17_self_attn_out_proj_bias3) add752: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add748, add751) layer_norm214: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add752, model_decoder_layers_17_encoder_attn_layer_norm_weight3, model_decoder_layers_17_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims654: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_q_proj_weight3, axes=None) matmul653: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm214, permute_dims654, out_dtype="void") add753: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul653, model_decoder_layers_17_encoder_attn_q_proj_bias3) reshape886: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add753, R.shape([batch_size, 1, 20, 64])) reshape887: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape886, R.shape([batch_size, 20, 64])) lv169 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape887), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape888: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv169, R.shape([batch_size, 1, 20, 64])) reshape889: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape888, R.shape([batch_size, 1, 1280])) permute_dims655: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_out_proj_weight3, axes=None) matmul654: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape889, permute_dims655, out_dtype="void") add754: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul654, model_decoder_layers_17_encoder_attn_out_proj_bias3) add755: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add752, add754) layer_norm215: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add755, model_decoder_layers_17_final_layer_norm_weight3, model_decoder_layers_17_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims656: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc1_weight3, axes=None) matmul655: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm215, permute_dims656, out_dtype="void") add756: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul655, model_decoder_layers_17_fc1_bias3) gelu83: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add756) permute_dims657: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc2_weight3, axes=None) matmul656: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu83, permute_dims657, out_dtype="void") add757: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul656, model_decoder_layers_17_fc2_bias3) add758: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add755, add757) layer_norm216: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add758, model_decoder_layers_18_self_attn_layer_norm_weight3, model_decoder_layers_18_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims658: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_q_proj_weight3, axes=None) matmul657: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm216, permute_dims658, out_dtype="void") add759: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul657, model_decoder_layers_18_self_attn_q_proj_bias3) reshape890: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add759, R.shape([batch_size, 1, 20, 64])) permute_dims659: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_k_proj_weight3, axes=None) matmul658: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm216, permute_dims659, out_dtype="void") reshape891: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul658, R.shape([batch_size, 1, 20, 64])) permute_dims660: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_v_proj_weight3, axes=None) matmul659: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm216, permute_dims660, out_dtype="void") add760: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul659, model_decoder_layers_18_self_attn_v_proj_bias3) reshape892: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add760, R.shape([batch_size, 1, 20, 64])) concat50: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape890, reshape891, reshape892), axis=2) reshape893: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat50, R.shape([batch_size, 60, 64])) lv170 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape893), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape894: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv170, R.shape([batch_size, 1, 20, 64])) reshape895: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape894, R.shape([batch_size, 1, 1280])) permute_dims661: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_out_proj_weight3, axes=None) matmul660: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape895, permute_dims661, out_dtype="void") add761: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul660, model_decoder_layers_18_self_attn_out_proj_bias3) add762: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add758, add761) layer_norm217: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add762, model_decoder_layers_18_encoder_attn_layer_norm_weight3, model_decoder_layers_18_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims662: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_q_proj_weight3, axes=None) matmul661: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm217, permute_dims662, out_dtype="void") add763: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul661, model_decoder_layers_18_encoder_attn_q_proj_bias3) reshape896: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add763, R.shape([batch_size, 1, 20, 64])) reshape897: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape896, R.shape([batch_size, 20, 64])) lv171 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape897), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape898: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv171, R.shape([batch_size, 1, 20, 64])) reshape899: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape898, R.shape([batch_size, 1, 1280])) permute_dims663: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_out_proj_weight3, axes=None) matmul662: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape899, permute_dims663, out_dtype="void") add764: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul662, model_decoder_layers_18_encoder_attn_out_proj_bias3) add765: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add762, add764) layer_norm218: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add765, model_decoder_layers_18_final_layer_norm_weight3, model_decoder_layers_18_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims664: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc1_weight3, axes=None) matmul663: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm218, permute_dims664, out_dtype="void") add766: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul663, model_decoder_layers_18_fc1_bias3) gelu84: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add766) permute_dims665: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc2_weight3, axes=None) matmul664: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu84, permute_dims665, out_dtype="void") add767: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul664, model_decoder_layers_18_fc2_bias3) add768: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add765, add767) layer_norm219: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add768, model_decoder_layers_19_self_attn_layer_norm_weight3, model_decoder_layers_19_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims666: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_q_proj_weight3, axes=None) matmul665: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm219, permute_dims666, out_dtype="void") add769: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul665, model_decoder_layers_19_self_attn_q_proj_bias3) reshape900: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add769, R.shape([batch_size, 1, 20, 64])) permute_dims667: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_k_proj_weight3, axes=None) matmul666: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm219, permute_dims667, out_dtype="void") reshape901: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul666, R.shape([batch_size, 1, 20, 64])) permute_dims668: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_v_proj_weight3, axes=None) matmul667: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm219, permute_dims668, out_dtype="void") add770: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul667, model_decoder_layers_19_self_attn_v_proj_bias3) reshape902: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add770, R.shape([batch_size, 1, 20, 64])) concat51: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape900, reshape901, reshape902), axis=2) reshape903: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat51, R.shape([batch_size, 60, 64])) lv172 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape903), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape904: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv172, R.shape([batch_size, 1, 20, 64])) reshape905: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape904, R.shape([batch_size, 1, 1280])) permute_dims669: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_out_proj_weight3, axes=None) matmul668: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape905, permute_dims669, out_dtype="void") add771: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul668, model_decoder_layers_19_self_attn_out_proj_bias3) add772: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add768, add771) layer_norm220: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add772, model_decoder_layers_19_encoder_attn_layer_norm_weight3, model_decoder_layers_19_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims670: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_q_proj_weight3, axes=None) matmul669: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm220, permute_dims670, out_dtype="void") add773: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul669, model_decoder_layers_19_encoder_attn_q_proj_bias3) reshape906: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add773, R.shape([batch_size, 1, 20, 64])) reshape907: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape906, R.shape([batch_size, 20, 64])) lv173 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape907), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape908: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv173, R.shape([batch_size, 1, 20, 64])) reshape909: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape908, R.shape([batch_size, 1, 1280])) permute_dims671: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_out_proj_weight3, axes=None) matmul670: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape909, permute_dims671, out_dtype="void") add774: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul670, model_decoder_layers_19_encoder_attn_out_proj_bias3) add775: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add772, add774) layer_norm221: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add775, model_decoder_layers_19_final_layer_norm_weight3, model_decoder_layers_19_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims672: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc1_weight3, axes=None) matmul671: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm221, permute_dims672, out_dtype="void") add776: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul671, model_decoder_layers_19_fc1_bias3) gelu85: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add776) permute_dims673: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc2_weight3, axes=None) matmul672: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu85, permute_dims673, out_dtype="void") add777: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul672, model_decoder_layers_19_fc2_bias3) add778: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add775, add777) layer_norm222: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add778, model_decoder_layers_20_self_attn_layer_norm_weight3, model_decoder_layers_20_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims674: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_q_proj_weight3, axes=None) matmul673: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm222, permute_dims674, out_dtype="void") add779: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul673, model_decoder_layers_20_self_attn_q_proj_bias3) reshape910: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add779, R.shape([batch_size, 1, 20, 64])) permute_dims675: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_k_proj_weight3, axes=None) matmul674: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm222, permute_dims675, out_dtype="void") reshape911: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul674, R.shape([batch_size, 1, 20, 64])) permute_dims676: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_v_proj_weight3, axes=None) matmul675: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm222, permute_dims676, out_dtype="void") add780: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul675, model_decoder_layers_20_self_attn_v_proj_bias3) reshape912: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add780, R.shape([batch_size, 1, 20, 64])) concat52: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape910, reshape911, reshape912), axis=2) reshape913: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat52, R.shape([batch_size, 60, 64])) lv174 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape913), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape914: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv174, R.shape([batch_size, 1, 20, 64])) reshape915: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape914, R.shape([batch_size, 1, 1280])) permute_dims677: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_out_proj_weight3, axes=None) matmul676: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape915, permute_dims677, out_dtype="void") add781: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul676, model_decoder_layers_20_self_attn_out_proj_bias3) add782: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add778, add781) layer_norm223: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add782, model_decoder_layers_20_encoder_attn_layer_norm_weight3, model_decoder_layers_20_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims678: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_q_proj_weight3, axes=None) matmul677: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm223, permute_dims678, out_dtype="void") add783: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul677, model_decoder_layers_20_encoder_attn_q_proj_bias3) reshape916: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add783, R.shape([batch_size, 1, 20, 64])) reshape917: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape916, R.shape([batch_size, 20, 64])) lv175 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape917), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape918: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv175, R.shape([batch_size, 1, 20, 64])) reshape919: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape918, R.shape([batch_size, 1, 1280])) permute_dims679: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_out_proj_weight3, axes=None) matmul678: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape919, permute_dims679, out_dtype="void") add784: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul678, model_decoder_layers_20_encoder_attn_out_proj_bias3) add785: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add782, add784) layer_norm224: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add785, model_decoder_layers_20_final_layer_norm_weight3, model_decoder_layers_20_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims680: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc1_weight3, axes=None) matmul679: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm224, permute_dims680, out_dtype="void") add786: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul679, model_decoder_layers_20_fc1_bias3) gelu86: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add786) permute_dims681: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc2_weight3, axes=None) matmul680: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu86, permute_dims681, out_dtype="void") add787: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul680, model_decoder_layers_20_fc2_bias3) add788: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add785, add787) layer_norm225: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add788, model_decoder_layers_21_self_attn_layer_norm_weight3, model_decoder_layers_21_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims682: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_q_proj_weight3, axes=None) matmul681: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm225, permute_dims682, out_dtype="void") add789: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul681, model_decoder_layers_21_self_attn_q_proj_bias3) reshape920: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add789, R.shape([batch_size, 1, 20, 64])) permute_dims683: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_k_proj_weight3, axes=None) matmul682: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm225, permute_dims683, out_dtype="void") reshape921: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul682, R.shape([batch_size, 1, 20, 64])) permute_dims684: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_v_proj_weight3, axes=None) matmul683: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm225, permute_dims684, out_dtype="void") add790: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul683, model_decoder_layers_21_self_attn_v_proj_bias3) reshape922: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add790, R.shape([batch_size, 1, 20, 64])) concat53: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape920, reshape921, reshape922), axis=2) reshape923: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat53, R.shape([batch_size, 60, 64])) lv176 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape923), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape924: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv176, R.shape([batch_size, 1, 20, 64])) reshape925: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape924, R.shape([batch_size, 1, 1280])) permute_dims685: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_out_proj_weight3, axes=None) matmul684: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape925, permute_dims685, out_dtype="void") add791: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul684, model_decoder_layers_21_self_attn_out_proj_bias3) add792: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add788, add791) layer_norm226: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add792, model_decoder_layers_21_encoder_attn_layer_norm_weight3, model_decoder_layers_21_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims686: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_q_proj_weight3, axes=None) matmul685: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm226, permute_dims686, out_dtype="void") add793: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul685, model_decoder_layers_21_encoder_attn_q_proj_bias3) reshape926: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add793, R.shape([batch_size, 1, 20, 64])) reshape927: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape926, R.shape([batch_size, 20, 64])) lv177 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape927), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape928: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv177, R.shape([batch_size, 1, 20, 64])) reshape929: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape928, R.shape([batch_size, 1, 1280])) permute_dims687: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_out_proj_weight3, axes=None) matmul686: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape929, permute_dims687, out_dtype="void") add794: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul686, model_decoder_layers_21_encoder_attn_out_proj_bias3) add795: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add792, add794) layer_norm227: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add795, model_decoder_layers_21_final_layer_norm_weight3, model_decoder_layers_21_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims688: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc1_weight3, axes=None) matmul687: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm227, permute_dims688, out_dtype="void") add796: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul687, model_decoder_layers_21_fc1_bias3) gelu87: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add796) permute_dims689: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc2_weight3, axes=None) matmul688: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu87, permute_dims689, out_dtype="void") add797: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul688, model_decoder_layers_21_fc2_bias3) add798: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add795, add797) layer_norm228: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add798, model_decoder_layers_22_self_attn_layer_norm_weight3, model_decoder_layers_22_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims690: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_q_proj_weight3, axes=None) matmul689: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm228, permute_dims690, out_dtype="void") add799: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul689, model_decoder_layers_22_self_attn_q_proj_bias3) reshape930: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add799, R.shape([batch_size, 1, 20, 64])) permute_dims691: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_k_proj_weight3, axes=None) matmul690: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm228, permute_dims691, out_dtype="void") reshape931: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul690, R.shape([batch_size, 1, 20, 64])) permute_dims692: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_v_proj_weight3, axes=None) matmul691: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm228, permute_dims692, out_dtype="void") add800: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul691, model_decoder_layers_22_self_attn_v_proj_bias3) reshape932: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add800, R.shape([batch_size, 1, 20, 64])) concat54: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape930, reshape931, reshape932), axis=2) reshape933: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat54, R.shape([batch_size, 60, 64])) lv178 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape933), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape934: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv178, R.shape([batch_size, 1, 20, 64])) reshape935: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape934, R.shape([batch_size, 1, 1280])) permute_dims693: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_out_proj_weight3, axes=None) matmul692: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape935, permute_dims693, out_dtype="void") add801: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul692, model_decoder_layers_22_self_attn_out_proj_bias3) add802: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add798, add801) layer_norm229: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add802, model_decoder_layers_22_encoder_attn_layer_norm_weight3, model_decoder_layers_22_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims694: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_q_proj_weight3, axes=None) matmul693: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm229, permute_dims694, out_dtype="void") add803: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul693, model_decoder_layers_22_encoder_attn_q_proj_bias3) reshape936: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add803, R.shape([batch_size, 1, 20, 64])) reshape937: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape936, R.shape([batch_size, 20, 64])) lv179 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape937), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape938: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv179, R.shape([batch_size, 1, 20, 64])) reshape939: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape938, R.shape([batch_size, 1, 1280])) permute_dims695: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_out_proj_weight3, axes=None) matmul694: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape939, permute_dims695, out_dtype="void") add804: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul694, model_decoder_layers_22_encoder_attn_out_proj_bias3) add805: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add802, add804) layer_norm230: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add805, model_decoder_layers_22_final_layer_norm_weight3, model_decoder_layers_22_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims696: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc1_weight3, axes=None) matmul695: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm230, permute_dims696, out_dtype="void") add806: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul695, model_decoder_layers_22_fc1_bias3) gelu88: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add806) permute_dims697: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc2_weight3, axes=None) matmul696: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu88, permute_dims697, out_dtype="void") add807: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul696, model_decoder_layers_22_fc2_bias3) add808: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add805, add807) layer_norm231: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add808, model_decoder_layers_23_self_attn_layer_norm_weight3, model_decoder_layers_23_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims698: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_q_proj_weight3, axes=None) matmul697: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm231, permute_dims698, out_dtype="void") add809: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul697, model_decoder_layers_23_self_attn_q_proj_bias3) reshape940: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add809, R.shape([batch_size, 1, 20, 64])) permute_dims699: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_k_proj_weight3, axes=None) matmul698: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm231, permute_dims699, out_dtype="void") reshape941: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul698, R.shape([batch_size, 1, 20, 64])) permute_dims700: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_v_proj_weight3, axes=None) matmul699: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm231, permute_dims700, out_dtype="void") add810: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul699, model_decoder_layers_23_self_attn_v_proj_bias3) reshape942: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add810, R.shape([batch_size, 1, 20, 64])) concat55: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape940, reshape941, reshape942), axis=2) reshape943: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat55, R.shape([batch_size, 60, 64])) lv180 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape943), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape944: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv180, R.shape([batch_size, 1, 20, 64])) reshape945: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape944, R.shape([batch_size, 1, 1280])) permute_dims701: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_out_proj_weight3, axes=None) matmul700: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape945, permute_dims701, out_dtype="void") add811: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul700, model_decoder_layers_23_self_attn_out_proj_bias3) add812: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add808, add811) layer_norm232: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add812, model_decoder_layers_23_encoder_attn_layer_norm_weight3, model_decoder_layers_23_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims702: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_q_proj_weight3, axes=None) matmul701: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm232, permute_dims702, out_dtype="void") add813: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul701, model_decoder_layers_23_encoder_attn_q_proj_bias3) reshape946: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add813, R.shape([batch_size, 1, 20, 64])) reshape947: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape946, R.shape([batch_size, 20, 64])) lv181 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape947), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape948: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv181, R.shape([batch_size, 1, 20, 64])) reshape949: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape948, R.shape([batch_size, 1, 1280])) permute_dims703: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_out_proj_weight3, axes=None) matmul702: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape949, permute_dims703, out_dtype="void") add814: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul702, model_decoder_layers_23_encoder_attn_out_proj_bias3) add815: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add812, add814) layer_norm233: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add815, model_decoder_layers_23_final_layer_norm_weight3, model_decoder_layers_23_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims704: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc1_weight3, axes=None) matmul703: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm233, permute_dims704, out_dtype="void") add816: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul703, model_decoder_layers_23_fc1_bias3) gelu89: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add816) permute_dims705: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc2_weight3, axes=None) matmul704: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu89, permute_dims705, out_dtype="void") add817: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul704, model_decoder_layers_23_fc2_bias3) add818: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add815, add817) layer_norm234: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add818, model_decoder_layers_24_self_attn_layer_norm_weight3, model_decoder_layers_24_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims706: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_q_proj_weight3, axes=None) matmul705: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm234, permute_dims706, out_dtype="void") add819: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul705, model_decoder_layers_24_self_attn_q_proj_bias3) reshape950: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add819, R.shape([batch_size, 1, 20, 64])) permute_dims707: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_k_proj_weight3, axes=None) matmul706: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm234, permute_dims707, out_dtype="void") reshape951: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul706, R.shape([batch_size, 1, 20, 64])) permute_dims708: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_v_proj_weight3, axes=None) matmul707: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm234, permute_dims708, out_dtype="void") add820: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul707, model_decoder_layers_24_self_attn_v_proj_bias3) reshape952: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add820, R.shape([batch_size, 1, 20, 64])) concat56: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape950, reshape951, reshape952), axis=2) reshape953: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat56, R.shape([batch_size, 60, 64])) lv182 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape953), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape954: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv182, R.shape([batch_size, 1, 20, 64])) reshape955: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape954, R.shape([batch_size, 1, 1280])) permute_dims709: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_out_proj_weight3, axes=None) matmul708: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape955, permute_dims709, out_dtype="void") add821: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul708, model_decoder_layers_24_self_attn_out_proj_bias3) add822: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add818, add821) layer_norm235: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add822, model_decoder_layers_24_encoder_attn_layer_norm_weight3, model_decoder_layers_24_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims710: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_q_proj_weight3, axes=None) matmul709: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm235, permute_dims710, out_dtype="void") add823: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul709, model_decoder_layers_24_encoder_attn_q_proj_bias3) reshape956: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add823, R.shape([batch_size, 1, 20, 64])) reshape957: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape956, R.shape([batch_size, 20, 64])) lv183 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape957), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape958: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv183, R.shape([batch_size, 1, 20, 64])) reshape959: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape958, R.shape([batch_size, 1, 1280])) permute_dims711: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_out_proj_weight3, axes=None) matmul710: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape959, permute_dims711, out_dtype="void") add824: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul710, model_decoder_layers_24_encoder_attn_out_proj_bias3) add825: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add822, add824) layer_norm236: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add825, model_decoder_layers_24_final_layer_norm_weight3, model_decoder_layers_24_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims712: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc1_weight3, axes=None) matmul711: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm236, permute_dims712, out_dtype="void") add826: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul711, model_decoder_layers_24_fc1_bias3) gelu90: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add826) permute_dims713: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc2_weight3, axes=None) matmul712: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu90, permute_dims713, out_dtype="void") add827: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul712, model_decoder_layers_24_fc2_bias3) add828: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add825, add827) layer_norm237: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add828, model_decoder_layers_25_self_attn_layer_norm_weight3, model_decoder_layers_25_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims714: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_q_proj_weight3, axes=None) matmul713: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm237, permute_dims714, out_dtype="void") add829: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul713, model_decoder_layers_25_self_attn_q_proj_bias3) reshape960: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add829, R.shape([batch_size, 1, 20, 64])) permute_dims715: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_k_proj_weight3, axes=None) matmul714: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm237, permute_dims715, out_dtype="void") reshape961: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul714, R.shape([batch_size, 1, 20, 64])) permute_dims716: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_v_proj_weight3, axes=None) matmul715: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm237, permute_dims716, out_dtype="void") add830: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul715, model_decoder_layers_25_self_attn_v_proj_bias3) reshape962: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add830, R.shape([batch_size, 1, 20, 64])) concat57: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape960, reshape961, reshape962), axis=2) reshape963: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat57, R.shape([batch_size, 60, 64])) lv184 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape963), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape964: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv184, R.shape([batch_size, 1, 20, 64])) reshape965: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape964, R.shape([batch_size, 1, 1280])) permute_dims717: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_out_proj_weight3, axes=None) matmul716: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape965, permute_dims717, out_dtype="void") add831: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul716, model_decoder_layers_25_self_attn_out_proj_bias3) add832: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add828, add831) layer_norm238: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add832, model_decoder_layers_25_encoder_attn_layer_norm_weight3, model_decoder_layers_25_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims718: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_q_proj_weight3, axes=None) matmul717: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm238, permute_dims718, out_dtype="void") add833: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul717, model_decoder_layers_25_encoder_attn_q_proj_bias3) reshape966: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add833, R.shape([batch_size, 1, 20, 64])) reshape967: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape966, R.shape([batch_size, 20, 64])) lv185 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape967), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape968: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv185, R.shape([batch_size, 1, 20, 64])) reshape969: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape968, R.shape([batch_size, 1, 1280])) permute_dims719: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_out_proj_weight3, axes=None) matmul718: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape969, permute_dims719, out_dtype="void") add834: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul718, model_decoder_layers_25_encoder_attn_out_proj_bias3) add835: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add832, add834) layer_norm239: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add835, model_decoder_layers_25_final_layer_norm_weight3, model_decoder_layers_25_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims720: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc1_weight3, axes=None) matmul719: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm239, permute_dims720, out_dtype="void") add836: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul719, model_decoder_layers_25_fc1_bias3) gelu91: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add836) permute_dims721: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc2_weight3, axes=None) matmul720: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu91, permute_dims721, out_dtype="void") add837: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul720, model_decoder_layers_25_fc2_bias3) add838: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add835, add837) layer_norm240: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add838, model_decoder_layers_26_self_attn_layer_norm_weight3, model_decoder_layers_26_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims722: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_q_proj_weight3, axes=None) matmul721: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm240, permute_dims722, out_dtype="void") add839: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul721, model_decoder_layers_26_self_attn_q_proj_bias3) reshape970: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add839, R.shape([batch_size, 1, 20, 64])) permute_dims723: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_k_proj_weight3, axes=None) matmul722: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm240, permute_dims723, out_dtype="void") reshape971: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul722, R.shape([batch_size, 1, 20, 64])) permute_dims724: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_v_proj_weight3, axes=None) matmul723: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm240, permute_dims724, out_dtype="void") add840: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul723, model_decoder_layers_26_self_attn_v_proj_bias3) reshape972: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add840, R.shape([batch_size, 1, 20, 64])) concat58: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape970, reshape971, reshape972), axis=2) reshape973: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat58, R.shape([batch_size, 60, 64])) lv186 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape973), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape974: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv186, R.shape([batch_size, 1, 20, 64])) reshape975: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape974, R.shape([batch_size, 1, 1280])) permute_dims725: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_out_proj_weight3, axes=None) matmul724: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape975, permute_dims725, out_dtype="void") add841: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul724, model_decoder_layers_26_self_attn_out_proj_bias3) add842: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add838, add841) layer_norm241: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add842, model_decoder_layers_26_encoder_attn_layer_norm_weight3, model_decoder_layers_26_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims726: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_q_proj_weight3, axes=None) matmul725: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm241, permute_dims726, out_dtype="void") add843: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul725, model_decoder_layers_26_encoder_attn_q_proj_bias3) reshape976: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add843, R.shape([batch_size, 1, 20, 64])) reshape977: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape976, R.shape([batch_size, 20, 64])) lv187 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape977), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape978: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv187, R.shape([batch_size, 1, 20, 64])) reshape979: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape978, R.shape([batch_size, 1, 1280])) permute_dims727: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_out_proj_weight3, axes=None) matmul726: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape979, permute_dims727, out_dtype="void") add844: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul726, model_decoder_layers_26_encoder_attn_out_proj_bias3) add845: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add842, add844) layer_norm242: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add845, model_decoder_layers_26_final_layer_norm_weight3, model_decoder_layers_26_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims728: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc1_weight3, axes=None) matmul727: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm242, permute_dims728, out_dtype="void") add846: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul727, model_decoder_layers_26_fc1_bias3) gelu92: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add846) permute_dims729: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc2_weight3, axes=None) matmul728: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu92, permute_dims729, out_dtype="void") add847: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul728, model_decoder_layers_26_fc2_bias3) add848: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add845, add847) layer_norm243: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add848, model_decoder_layers_27_self_attn_layer_norm_weight3, model_decoder_layers_27_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims730: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_q_proj_weight3, axes=None) matmul729: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm243, permute_dims730, out_dtype="void") add849: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul729, model_decoder_layers_27_self_attn_q_proj_bias3) reshape980: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add849, R.shape([batch_size, 1, 20, 64])) permute_dims731: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_k_proj_weight3, axes=None) matmul730: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm243, permute_dims731, out_dtype="void") reshape981: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul730, R.shape([batch_size, 1, 20, 64])) permute_dims732: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_v_proj_weight3, axes=None) matmul731: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm243, permute_dims732, out_dtype="void") add850: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul731, model_decoder_layers_27_self_attn_v_proj_bias3) reshape982: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add850, R.shape([batch_size, 1, 20, 64])) concat59: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape980, reshape981, reshape982), axis=2) reshape983: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat59, R.shape([batch_size, 60, 64])) lv188 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape983), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape984: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv188, R.shape([batch_size, 1, 20, 64])) reshape985: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape984, R.shape([batch_size, 1, 1280])) permute_dims733: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_out_proj_weight3, axes=None) matmul732: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape985, permute_dims733, out_dtype="void") add851: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul732, model_decoder_layers_27_self_attn_out_proj_bias3) add852: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add848, add851) layer_norm244: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add852, model_decoder_layers_27_encoder_attn_layer_norm_weight3, model_decoder_layers_27_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims734: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_q_proj_weight3, axes=None) matmul733: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm244, permute_dims734, out_dtype="void") add853: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul733, model_decoder_layers_27_encoder_attn_q_proj_bias3) reshape986: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add853, R.shape([batch_size, 1, 20, 64])) reshape987: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape986, R.shape([batch_size, 20, 64])) lv189 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape987), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape988: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv189, R.shape([batch_size, 1, 20, 64])) reshape989: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape988, R.shape([batch_size, 1, 1280])) permute_dims735: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_out_proj_weight3, axes=None) matmul734: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape989, permute_dims735, out_dtype="void") add854: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul734, model_decoder_layers_27_encoder_attn_out_proj_bias3) add855: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add852, add854) layer_norm245: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add855, model_decoder_layers_27_final_layer_norm_weight3, model_decoder_layers_27_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims736: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc1_weight3, axes=None) matmul735: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm245, permute_dims736, out_dtype="void") add856: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul735, model_decoder_layers_27_fc1_bias3) gelu93: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add856) permute_dims737: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc2_weight3, axes=None) matmul736: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu93, permute_dims737, out_dtype="void") add857: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul736, model_decoder_layers_27_fc2_bias3) add858: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add855, add857) layer_norm246: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add858, model_decoder_layers_28_self_attn_layer_norm_weight3, model_decoder_layers_28_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims738: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_q_proj_weight3, axes=None) matmul737: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm246, permute_dims738, out_dtype="void") add859: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul737, model_decoder_layers_28_self_attn_q_proj_bias3) reshape990: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add859, R.shape([batch_size, 1, 20, 64])) permute_dims739: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_k_proj_weight3, axes=None) matmul738: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm246, permute_dims739, out_dtype="void") reshape991: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul738, R.shape([batch_size, 1, 20, 64])) permute_dims740: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_v_proj_weight3, axes=None) matmul739: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm246, permute_dims740, out_dtype="void") add860: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul739, model_decoder_layers_28_self_attn_v_proj_bias3) reshape992: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add860, R.shape([batch_size, 1, 20, 64])) concat60: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape990, reshape991, reshape992), axis=2) reshape993: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat60, R.shape([batch_size, 60, 64])) lv190 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape993), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape994: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv190, R.shape([batch_size, 1, 20, 64])) reshape995: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape994, R.shape([batch_size, 1, 1280])) permute_dims741: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_out_proj_weight3, axes=None) matmul740: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape995, permute_dims741, out_dtype="void") add861: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul740, model_decoder_layers_28_self_attn_out_proj_bias3) add862: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add858, add861) layer_norm247: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add862, model_decoder_layers_28_encoder_attn_layer_norm_weight3, model_decoder_layers_28_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims742: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_q_proj_weight3, axes=None) matmul741: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm247, permute_dims742, out_dtype="void") add863: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul741, model_decoder_layers_28_encoder_attn_q_proj_bias3) reshape996: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add863, R.shape([batch_size, 1, 20, 64])) reshape997: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape996, R.shape([batch_size, 20, 64])) lv191 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape997), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape998: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv191, R.shape([batch_size, 1, 20, 64])) reshape999: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape998, R.shape([batch_size, 1, 1280])) permute_dims743: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_out_proj_weight3, axes=None) matmul742: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape999, permute_dims743, out_dtype="void") add864: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul742, model_decoder_layers_28_encoder_attn_out_proj_bias3) add865: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add862, add864) layer_norm248: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add865, model_decoder_layers_28_final_layer_norm_weight3, model_decoder_layers_28_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims744: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc1_weight3, axes=None) matmul743: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm248, permute_dims744, out_dtype="void") add866: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul743, model_decoder_layers_28_fc1_bias3) gelu94: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add866) permute_dims745: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc2_weight3, axes=None) matmul744: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu94, permute_dims745, out_dtype="void") add867: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul744, model_decoder_layers_28_fc2_bias3) add868: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add865, add867) layer_norm249: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add868, model_decoder_layers_29_self_attn_layer_norm_weight3, model_decoder_layers_29_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims746: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_q_proj_weight3, axes=None) matmul745: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm249, permute_dims746, out_dtype="void") add869: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul745, model_decoder_layers_29_self_attn_q_proj_bias3) reshape1000: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add869, R.shape([batch_size, 1, 20, 64])) permute_dims747: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_k_proj_weight3, axes=None) matmul746: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm249, permute_dims747, out_dtype="void") reshape1001: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul746, R.shape([batch_size, 1, 20, 64])) permute_dims748: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_v_proj_weight3, axes=None) matmul747: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm249, permute_dims748, out_dtype="void") add870: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul747, model_decoder_layers_29_self_attn_v_proj_bias3) reshape1002: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add870, R.shape([batch_size, 1, 20, 64])) concat61: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape1000, reshape1001, reshape1002), axis=2) reshape1003: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat61, R.shape([batch_size, 60, 64])) lv192 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1003), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape1004: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv192, R.shape([batch_size, 1, 20, 64])) reshape1005: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1004, R.shape([batch_size, 1, 1280])) permute_dims749: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_out_proj_weight3, axes=None) matmul748: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape1005, permute_dims749, out_dtype="void") add871: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul748, model_decoder_layers_29_self_attn_out_proj_bias3) add872: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add868, add871) layer_norm250: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add872, model_decoder_layers_29_encoder_attn_layer_norm_weight3, model_decoder_layers_29_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims750: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_q_proj_weight3, axes=None) matmul749: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm250, permute_dims750, out_dtype="void") add873: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul749, model_decoder_layers_29_encoder_attn_q_proj_bias3) reshape1006: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add873, R.shape([batch_size, 1, 20, 64])) reshape1007: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape1006, R.shape([batch_size, 20, 64])) lv193 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1007), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape1008: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv193, R.shape([batch_size, 1, 20, 64])) reshape1009: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1008, R.shape([batch_size, 1, 1280])) permute_dims751: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_out_proj_weight3, axes=None) matmul750: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape1009, permute_dims751, out_dtype="void") add874: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul750, model_decoder_layers_29_encoder_attn_out_proj_bias3) add875: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add872, add874) layer_norm251: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add875, model_decoder_layers_29_final_layer_norm_weight3, model_decoder_layers_29_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims752: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc1_weight3, axes=None) matmul751: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm251, permute_dims752, out_dtype="void") add876: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul751, model_decoder_layers_29_fc1_bias3) gelu95: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add876) permute_dims753: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc2_weight3, axes=None) matmul752: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu95, permute_dims753, out_dtype="void") add877: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul752, model_decoder_layers_29_fc2_bias3) add878: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add875, add877) layer_norm252: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add878, model_decoder_layers_30_self_attn_layer_norm_weight3, model_decoder_layers_30_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims754: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_q_proj_weight3, axes=None) matmul753: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm252, permute_dims754, out_dtype="void") add879: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul753, model_decoder_layers_30_self_attn_q_proj_bias3) reshape1010: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add879, R.shape([batch_size, 1, 20, 64])) permute_dims755: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_k_proj_weight3, axes=None) matmul754: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm252, permute_dims755, out_dtype="void") reshape1011: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul754, R.shape([batch_size, 1, 20, 64])) permute_dims756: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_v_proj_weight3, axes=None) matmul755: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm252, permute_dims756, out_dtype="void") add880: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul755, model_decoder_layers_30_self_attn_v_proj_bias3) reshape1012: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add880, R.shape([batch_size, 1, 20, 64])) concat62: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape1010, reshape1011, reshape1012), axis=2) reshape1013: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat62, R.shape([batch_size, 60, 64])) lv194 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1013), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape1014: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv194, R.shape([batch_size, 1, 20, 64])) reshape1015: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1014, R.shape([batch_size, 1, 1280])) permute_dims757: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_out_proj_weight3, axes=None) matmul756: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape1015, permute_dims757, out_dtype="void") add881: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul756, model_decoder_layers_30_self_attn_out_proj_bias3) add882: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add878, add881) layer_norm253: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add882, model_decoder_layers_30_encoder_attn_layer_norm_weight3, model_decoder_layers_30_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims758: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_q_proj_weight3, axes=None) matmul757: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm253, permute_dims758, out_dtype="void") add883: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul757, model_decoder_layers_30_encoder_attn_q_proj_bias3) reshape1016: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add883, R.shape([batch_size, 1, 20, 64])) reshape1017: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape1016, R.shape([batch_size, 20, 64])) lv195 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1017), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape1018: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv195, R.shape([batch_size, 1, 20, 64])) reshape1019: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1018, R.shape([batch_size, 1, 1280])) permute_dims759: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_out_proj_weight3, axes=None) matmul758: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape1019, permute_dims759, out_dtype="void") add884: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul758, model_decoder_layers_30_encoder_attn_out_proj_bias3) add885: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add882, add884) layer_norm254: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add885, model_decoder_layers_30_final_layer_norm_weight3, model_decoder_layers_30_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims760: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc1_weight3, axes=None) matmul759: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm254, permute_dims760, out_dtype="void") add886: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul759, model_decoder_layers_30_fc1_bias3) gelu96: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add886) permute_dims761: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc2_weight3, axes=None) matmul760: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu96, permute_dims761, out_dtype="void") add887: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul760, model_decoder_layers_30_fc2_bias3) add888: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add885, add887) layer_norm255: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add888, model_decoder_layers_31_self_attn_layer_norm_weight3, model_decoder_layers_31_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims762: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_q_proj_weight3, axes=None) matmul761: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm255, permute_dims762, out_dtype="void") add889: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul761, model_decoder_layers_31_self_attn_q_proj_bias3) reshape1020: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add889, R.shape([batch_size, 1, 20, 64])) permute_dims763: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_k_proj_weight3, axes=None) matmul762: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm255, permute_dims763, out_dtype="void") reshape1021: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(matmul762, R.shape([batch_size, 1, 20, 64])) permute_dims764: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_v_proj_weight3, axes=None) matmul763: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm255, permute_dims764, out_dtype="void") add890: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul763, model_decoder_layers_31_self_attn_v_proj_bias3) reshape1022: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add890, R.shape([batch_size, 1, 20, 64])) concat63: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape1020, reshape1021, reshape1022), axis=2) reshape1023: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat63, R.shape([batch_size, 60, 64])) lv196 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1023), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape1024: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv196, R.shape([batch_size, 1, 20, 64])) reshape1025: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1024, R.shape([batch_size, 1, 1280])) permute_dims765: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_out_proj_weight3, axes=None) matmul764: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape1025, permute_dims765, out_dtype="void") add891: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul764, model_decoder_layers_31_self_attn_out_proj_bias3) add892: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add888, add891) layer_norm256: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add892, model_decoder_layers_31_encoder_attn_layer_norm_weight3, model_decoder_layers_31_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims766: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_q_proj_weight3, axes=None) matmul765: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(layer_norm256, permute_dims766, out_dtype="void") add893: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul765, model_decoder_layers_31_encoder_attn_q_proj_bias3) reshape1026: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(add893, R.shape([batch_size, 1, 20, 64])) reshape1027: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape1026, R.shape([batch_size, 20, 64])) lv197 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1027), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) reshape1028: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv197, R.shape([batch_size, 1, 20, 64])) reshape1029: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1028, R.shape([batch_size, 1, 1280])) permute_dims767: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_out_proj_weight3, axes=None) matmul766: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(reshape1029, permute_dims767, out_dtype="void") add894: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul766, model_decoder_layers_31_encoder_attn_out_proj_bias3) add895: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add892, add894) layer_norm257: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add895, model_decoder_layers_31_final_layer_norm_weight3, model_decoder_layers_31_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims768: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc1_weight3, axes=None) matmul767: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.matmul(layer_norm257, permute_dims768, out_dtype="void") add896: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.add(matmul767, model_decoder_layers_31_fc1_bias3) gelu97: R.Tensor((batch_size, 1, 5120), dtype="float16") = R.nn.gelu(add896) permute_dims769: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc2_weight3, axes=None) matmul768: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.matmul(gelu97, permute_dims769, out_dtype="void") add897: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(matmul768, model_decoder_layers_31_fc2_bias3) add898: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add895, add897) layer_norm258: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add898, model_decoder_layer_norm_weight3, model_decoder_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims770: R.Tensor((1280, 51866), dtype="float16") = R.permute_dims(model_decoder_embed_tokens_weight3, axes=None) matmul769: R.Tensor((batch_size, 1, 51866), dtype="float32") = R.matmul(layer_norm258, permute_dims770, out_dtype="float32") gv3: R.Tensor((batch_size, 1, 51866), dtype="float32") = matmul769 R.output(gv3) return gv3 @R.function def batch_encode(input_features: R.Tensor(("batch_size", 128, 3000), dtype="float16"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor(("batch_size", 1500, 1280), dtype="float16"): batch_size = T.int64() R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) with R.dataflow(): model_encoder_conv1_weight: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] model_encoder_conv1_bias: R.Tensor((1280,), dtype="float16") = packed_params[1] model_encoder_conv2_weight: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] model_encoder_conv2_bias: R.Tensor((1280,), dtype="float16") = packed_params[3] model_encoder_embed_positions_weight: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] model_encoder_layers_0_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] model_encoder_layers_0_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] model_encoder_layers_0_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[7] model_encoder_layers_0_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] model_encoder_layers_0_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[9] model_encoder_layers_0_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] model_encoder_layers_0_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[11] model_encoder_layers_0_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[12] model_encoder_layers_0_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[13] model_encoder_layers_0_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] model_encoder_layers_0_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[15] model_encoder_layers_0_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] model_encoder_layers_0_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[17] model_encoder_layers_0_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[18] model_encoder_layers_0_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[19] model_encoder_layers_1_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] model_encoder_layers_1_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] model_encoder_layers_1_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[22] model_encoder_layers_1_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] model_encoder_layers_1_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[24] model_encoder_layers_1_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] model_encoder_layers_1_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[26] model_encoder_layers_1_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[27] model_encoder_layers_1_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[28] model_encoder_layers_1_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] model_encoder_layers_1_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[30] model_encoder_layers_1_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] model_encoder_layers_1_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[32] model_encoder_layers_1_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[33] model_encoder_layers_1_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[34] model_encoder_layers_2_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] model_encoder_layers_2_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] model_encoder_layers_2_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[37] model_encoder_layers_2_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] model_encoder_layers_2_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[39] model_encoder_layers_2_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] model_encoder_layers_2_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[41] model_encoder_layers_2_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[42] model_encoder_layers_2_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[43] model_encoder_layers_2_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] model_encoder_layers_2_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[45] model_encoder_layers_2_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] model_encoder_layers_2_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[47] model_encoder_layers_2_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[48] model_encoder_layers_2_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[49] model_encoder_layers_3_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] model_encoder_layers_3_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] model_encoder_layers_3_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[52] model_encoder_layers_3_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] model_encoder_layers_3_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[54] model_encoder_layers_3_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] model_encoder_layers_3_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[56] model_encoder_layers_3_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[57] model_encoder_layers_3_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[58] model_encoder_layers_3_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] model_encoder_layers_3_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[60] model_encoder_layers_3_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] model_encoder_layers_3_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[62] model_encoder_layers_3_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[63] model_encoder_layers_3_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[64] model_encoder_layers_4_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] model_encoder_layers_4_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] model_encoder_layers_4_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[67] model_encoder_layers_4_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] model_encoder_layers_4_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[69] model_encoder_layers_4_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] model_encoder_layers_4_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[71] model_encoder_layers_4_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[72] model_encoder_layers_4_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[73] model_encoder_layers_4_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] model_encoder_layers_4_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[75] model_encoder_layers_4_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] model_encoder_layers_4_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[77] model_encoder_layers_4_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[78] model_encoder_layers_4_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[79] model_encoder_layers_5_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] model_encoder_layers_5_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] model_encoder_layers_5_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[82] model_encoder_layers_5_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] model_encoder_layers_5_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[84] model_encoder_layers_5_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] model_encoder_layers_5_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[86] model_encoder_layers_5_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[87] model_encoder_layers_5_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[88] model_encoder_layers_5_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] model_encoder_layers_5_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[90] model_encoder_layers_5_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] model_encoder_layers_5_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[92] model_encoder_layers_5_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[93] model_encoder_layers_5_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[94] model_encoder_layers_6_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] model_encoder_layers_6_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] model_encoder_layers_6_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[97] model_encoder_layers_6_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] model_encoder_layers_6_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[99] model_encoder_layers_6_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] model_encoder_layers_6_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[101] model_encoder_layers_6_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[102] model_encoder_layers_6_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[103] model_encoder_layers_6_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] model_encoder_layers_6_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[105] model_encoder_layers_6_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] model_encoder_layers_6_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[107] model_encoder_layers_6_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[108] model_encoder_layers_6_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[109] model_encoder_layers_7_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] model_encoder_layers_7_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] model_encoder_layers_7_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[112] model_encoder_layers_7_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] model_encoder_layers_7_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[114] model_encoder_layers_7_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] model_encoder_layers_7_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[116] model_encoder_layers_7_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[117] model_encoder_layers_7_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[118] model_encoder_layers_7_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] model_encoder_layers_7_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[120] model_encoder_layers_7_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] model_encoder_layers_7_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[122] model_encoder_layers_7_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[123] model_encoder_layers_7_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[124] model_encoder_layers_8_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] model_encoder_layers_8_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] model_encoder_layers_8_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[127] model_encoder_layers_8_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] model_encoder_layers_8_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[129] model_encoder_layers_8_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] model_encoder_layers_8_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[131] model_encoder_layers_8_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[132] model_encoder_layers_8_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[133] model_encoder_layers_8_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] model_encoder_layers_8_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[135] model_encoder_layers_8_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] model_encoder_layers_8_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[137] model_encoder_layers_8_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[138] model_encoder_layers_8_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[139] model_encoder_layers_9_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] model_encoder_layers_9_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] model_encoder_layers_9_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[142] model_encoder_layers_9_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] model_encoder_layers_9_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[144] model_encoder_layers_9_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] model_encoder_layers_9_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[146] model_encoder_layers_9_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[147] model_encoder_layers_9_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[148] model_encoder_layers_9_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] model_encoder_layers_9_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[150] model_encoder_layers_9_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] model_encoder_layers_9_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[152] model_encoder_layers_9_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[153] model_encoder_layers_9_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[154] model_encoder_layers_10_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] model_encoder_layers_10_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] model_encoder_layers_10_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[157] model_encoder_layers_10_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] model_encoder_layers_10_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[159] model_encoder_layers_10_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] model_encoder_layers_10_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[161] model_encoder_layers_10_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[162] model_encoder_layers_10_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[163] model_encoder_layers_10_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] model_encoder_layers_10_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[165] model_encoder_layers_10_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] model_encoder_layers_10_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[167] model_encoder_layers_10_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[168] model_encoder_layers_10_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[169] model_encoder_layers_11_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] model_encoder_layers_11_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] model_encoder_layers_11_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[172] model_encoder_layers_11_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] model_encoder_layers_11_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[174] model_encoder_layers_11_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] model_encoder_layers_11_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[176] model_encoder_layers_11_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[177] model_encoder_layers_11_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[178] model_encoder_layers_11_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] model_encoder_layers_11_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[180] model_encoder_layers_11_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] model_encoder_layers_11_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[182] model_encoder_layers_11_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[183] model_encoder_layers_11_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[184] model_encoder_layers_12_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] model_encoder_layers_12_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] model_encoder_layers_12_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[187] model_encoder_layers_12_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] model_encoder_layers_12_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[189] model_encoder_layers_12_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] model_encoder_layers_12_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[191] model_encoder_layers_12_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[192] model_encoder_layers_12_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[193] model_encoder_layers_12_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] model_encoder_layers_12_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[195] model_encoder_layers_12_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] model_encoder_layers_12_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[197] model_encoder_layers_12_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[198] model_encoder_layers_12_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[199] model_encoder_layers_13_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] model_encoder_layers_13_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] model_encoder_layers_13_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[202] model_encoder_layers_13_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] model_encoder_layers_13_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[204] model_encoder_layers_13_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] model_encoder_layers_13_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[206] model_encoder_layers_13_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[207] model_encoder_layers_13_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[208] model_encoder_layers_13_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] model_encoder_layers_13_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[210] model_encoder_layers_13_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] model_encoder_layers_13_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[212] model_encoder_layers_13_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[213] model_encoder_layers_13_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[214] model_encoder_layers_14_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] model_encoder_layers_14_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] model_encoder_layers_14_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[217] model_encoder_layers_14_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] model_encoder_layers_14_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[219] model_encoder_layers_14_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] model_encoder_layers_14_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[221] model_encoder_layers_14_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[222] model_encoder_layers_14_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[223] model_encoder_layers_14_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] model_encoder_layers_14_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[225] model_encoder_layers_14_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] model_encoder_layers_14_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[227] model_encoder_layers_14_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[228] model_encoder_layers_14_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[229] model_encoder_layers_15_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] model_encoder_layers_15_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] model_encoder_layers_15_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[232] model_encoder_layers_15_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] model_encoder_layers_15_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[234] model_encoder_layers_15_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] model_encoder_layers_15_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[236] model_encoder_layers_15_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[237] model_encoder_layers_15_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[238] model_encoder_layers_15_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] model_encoder_layers_15_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[240] model_encoder_layers_15_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] model_encoder_layers_15_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[242] model_encoder_layers_15_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[243] model_encoder_layers_15_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[244] model_encoder_layers_16_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] model_encoder_layers_16_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] model_encoder_layers_16_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[247] model_encoder_layers_16_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] model_encoder_layers_16_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[249] model_encoder_layers_16_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] model_encoder_layers_16_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[251] model_encoder_layers_16_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[252] model_encoder_layers_16_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[253] model_encoder_layers_16_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] model_encoder_layers_16_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[255] model_encoder_layers_16_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] model_encoder_layers_16_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[257] model_encoder_layers_16_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[258] model_encoder_layers_16_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[259] model_encoder_layers_17_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] model_encoder_layers_17_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] model_encoder_layers_17_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[262] model_encoder_layers_17_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] model_encoder_layers_17_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[264] model_encoder_layers_17_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] model_encoder_layers_17_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[266] model_encoder_layers_17_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[267] model_encoder_layers_17_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[268] model_encoder_layers_17_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] model_encoder_layers_17_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[270] model_encoder_layers_17_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] model_encoder_layers_17_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[272] model_encoder_layers_17_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[273] model_encoder_layers_17_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[274] model_encoder_layers_18_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] model_encoder_layers_18_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] model_encoder_layers_18_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[277] model_encoder_layers_18_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] model_encoder_layers_18_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[279] model_encoder_layers_18_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] model_encoder_layers_18_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[281] model_encoder_layers_18_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[282] model_encoder_layers_18_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[283] model_encoder_layers_18_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] model_encoder_layers_18_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[285] model_encoder_layers_18_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] model_encoder_layers_18_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[287] model_encoder_layers_18_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[288] model_encoder_layers_18_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[289] model_encoder_layers_19_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] model_encoder_layers_19_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] model_encoder_layers_19_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[292] model_encoder_layers_19_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] model_encoder_layers_19_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[294] model_encoder_layers_19_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] model_encoder_layers_19_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[296] model_encoder_layers_19_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[297] model_encoder_layers_19_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[298] model_encoder_layers_19_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] model_encoder_layers_19_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[300] model_encoder_layers_19_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] model_encoder_layers_19_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[302] model_encoder_layers_19_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[303] model_encoder_layers_19_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[304] model_encoder_layers_20_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] model_encoder_layers_20_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] model_encoder_layers_20_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[307] model_encoder_layers_20_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] model_encoder_layers_20_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[309] model_encoder_layers_20_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] model_encoder_layers_20_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[311] model_encoder_layers_20_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[312] model_encoder_layers_20_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[313] model_encoder_layers_20_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] model_encoder_layers_20_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[315] model_encoder_layers_20_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] model_encoder_layers_20_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[317] model_encoder_layers_20_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[318] model_encoder_layers_20_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[319] model_encoder_layers_21_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] model_encoder_layers_21_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] model_encoder_layers_21_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[322] model_encoder_layers_21_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] model_encoder_layers_21_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[324] model_encoder_layers_21_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] model_encoder_layers_21_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[326] model_encoder_layers_21_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[327] model_encoder_layers_21_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[328] model_encoder_layers_21_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] model_encoder_layers_21_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[330] model_encoder_layers_21_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] model_encoder_layers_21_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[332] model_encoder_layers_21_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[333] model_encoder_layers_21_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[334] model_encoder_layers_22_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] model_encoder_layers_22_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] model_encoder_layers_22_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[337] model_encoder_layers_22_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] model_encoder_layers_22_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[339] model_encoder_layers_22_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] model_encoder_layers_22_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[341] model_encoder_layers_22_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[342] model_encoder_layers_22_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[343] model_encoder_layers_22_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] model_encoder_layers_22_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[345] model_encoder_layers_22_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] model_encoder_layers_22_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[347] model_encoder_layers_22_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[348] model_encoder_layers_22_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[349] model_encoder_layers_23_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] model_encoder_layers_23_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] model_encoder_layers_23_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[352] model_encoder_layers_23_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] model_encoder_layers_23_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[354] model_encoder_layers_23_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] model_encoder_layers_23_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[356] model_encoder_layers_23_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[357] model_encoder_layers_23_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[358] model_encoder_layers_23_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] model_encoder_layers_23_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[360] model_encoder_layers_23_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] model_encoder_layers_23_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[362] model_encoder_layers_23_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[363] model_encoder_layers_23_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[364] model_encoder_layers_24_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] model_encoder_layers_24_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] model_encoder_layers_24_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[367] model_encoder_layers_24_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] model_encoder_layers_24_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[369] model_encoder_layers_24_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] model_encoder_layers_24_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[371] model_encoder_layers_24_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[372] model_encoder_layers_24_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[373] model_encoder_layers_24_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] model_encoder_layers_24_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[375] model_encoder_layers_24_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] model_encoder_layers_24_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[377] model_encoder_layers_24_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[378] model_encoder_layers_24_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[379] model_encoder_layers_25_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] model_encoder_layers_25_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] model_encoder_layers_25_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[382] model_encoder_layers_25_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] model_encoder_layers_25_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[384] model_encoder_layers_25_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] model_encoder_layers_25_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[386] model_encoder_layers_25_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[387] model_encoder_layers_25_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[388] model_encoder_layers_25_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] model_encoder_layers_25_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[390] model_encoder_layers_25_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] model_encoder_layers_25_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[392] model_encoder_layers_25_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[393] model_encoder_layers_25_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[394] model_encoder_layers_26_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] model_encoder_layers_26_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] model_encoder_layers_26_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[397] model_encoder_layers_26_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] model_encoder_layers_26_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[399] model_encoder_layers_26_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] model_encoder_layers_26_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[401] model_encoder_layers_26_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[402] model_encoder_layers_26_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[403] model_encoder_layers_26_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] model_encoder_layers_26_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[405] model_encoder_layers_26_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] model_encoder_layers_26_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[407] model_encoder_layers_26_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[408] model_encoder_layers_26_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[409] model_encoder_layers_27_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] model_encoder_layers_27_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] model_encoder_layers_27_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[412] model_encoder_layers_27_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] model_encoder_layers_27_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[414] model_encoder_layers_27_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] model_encoder_layers_27_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[416] model_encoder_layers_27_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[417] model_encoder_layers_27_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[418] model_encoder_layers_27_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] model_encoder_layers_27_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[420] model_encoder_layers_27_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] model_encoder_layers_27_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[422] model_encoder_layers_27_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[423] model_encoder_layers_27_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[424] model_encoder_layers_28_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] model_encoder_layers_28_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] model_encoder_layers_28_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[427] model_encoder_layers_28_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] model_encoder_layers_28_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[429] model_encoder_layers_28_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] model_encoder_layers_28_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[431] model_encoder_layers_28_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[432] model_encoder_layers_28_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[433] model_encoder_layers_28_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] model_encoder_layers_28_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[435] model_encoder_layers_28_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] model_encoder_layers_28_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[437] model_encoder_layers_28_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[438] model_encoder_layers_28_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[439] model_encoder_layers_29_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] model_encoder_layers_29_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] model_encoder_layers_29_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[442] model_encoder_layers_29_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] model_encoder_layers_29_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[444] model_encoder_layers_29_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] model_encoder_layers_29_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[446] model_encoder_layers_29_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[447] model_encoder_layers_29_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[448] model_encoder_layers_29_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] model_encoder_layers_29_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[450] model_encoder_layers_29_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] model_encoder_layers_29_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[452] model_encoder_layers_29_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[453] model_encoder_layers_29_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[454] model_encoder_layers_30_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] model_encoder_layers_30_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] model_encoder_layers_30_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[457] model_encoder_layers_30_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] model_encoder_layers_30_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[459] model_encoder_layers_30_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] model_encoder_layers_30_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[461] model_encoder_layers_30_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[462] model_encoder_layers_30_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[463] model_encoder_layers_30_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] model_encoder_layers_30_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[465] model_encoder_layers_30_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] model_encoder_layers_30_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[467] model_encoder_layers_30_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[468] model_encoder_layers_30_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[469] model_encoder_layers_31_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] model_encoder_layers_31_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] model_encoder_layers_31_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[472] model_encoder_layers_31_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] model_encoder_layers_31_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[474] model_encoder_layers_31_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] model_encoder_layers_31_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[476] model_encoder_layers_31_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[477] model_encoder_layers_31_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[478] model_encoder_layers_31_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] model_encoder_layers_31_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[480] model_encoder_layers_31_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] model_encoder_layers_31_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[482] model_encoder_layers_31_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[483] model_encoder_layers_31_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[484] model_encoder_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[485] model_encoder_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[486] model_decoder_embed_tokens_weight: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] model_decoder_embed_positions_weight: R.Tensor((448, 1280), dtype="float16") = packed_params[488] model_decoder_layers_0_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] model_decoder_layers_0_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] model_decoder_layers_0_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[491] model_decoder_layers_0_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] model_decoder_layers_0_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[493] model_decoder_layers_0_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] model_decoder_layers_0_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[495] model_decoder_layers_0_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[496] model_decoder_layers_0_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[497] model_decoder_layers_0_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] model_decoder_layers_0_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] model_decoder_layers_0_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[500] model_decoder_layers_0_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] model_decoder_layers_0_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[502] model_decoder_layers_0_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] model_decoder_layers_0_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[504] model_decoder_layers_0_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[505] model_decoder_layers_0_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[506] model_decoder_layers_0_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] model_decoder_layers_0_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[508] model_decoder_layers_0_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] model_decoder_layers_0_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[510] model_decoder_layers_0_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[511] model_decoder_layers_0_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[512] model_decoder_layers_1_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] model_decoder_layers_1_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] model_decoder_layers_1_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[515] model_decoder_layers_1_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] model_decoder_layers_1_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[517] model_decoder_layers_1_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] model_decoder_layers_1_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[519] model_decoder_layers_1_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[520] model_decoder_layers_1_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[521] model_decoder_layers_1_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] model_decoder_layers_1_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] model_decoder_layers_1_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[524] model_decoder_layers_1_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] model_decoder_layers_1_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[526] model_decoder_layers_1_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] model_decoder_layers_1_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[528] model_decoder_layers_1_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[529] model_decoder_layers_1_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[530] model_decoder_layers_1_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] model_decoder_layers_1_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[532] model_decoder_layers_1_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] model_decoder_layers_1_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[534] model_decoder_layers_1_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[535] model_decoder_layers_1_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[536] model_decoder_layers_2_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] model_decoder_layers_2_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] model_decoder_layers_2_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[539] model_decoder_layers_2_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] model_decoder_layers_2_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[541] model_decoder_layers_2_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] model_decoder_layers_2_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[543] model_decoder_layers_2_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[544] model_decoder_layers_2_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[545] model_decoder_layers_2_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] model_decoder_layers_2_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] model_decoder_layers_2_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[548] model_decoder_layers_2_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] model_decoder_layers_2_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[550] model_decoder_layers_2_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] model_decoder_layers_2_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[552] model_decoder_layers_2_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[553] model_decoder_layers_2_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[554] model_decoder_layers_2_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] model_decoder_layers_2_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[556] model_decoder_layers_2_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] model_decoder_layers_2_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[558] model_decoder_layers_2_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[559] model_decoder_layers_2_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[560] model_decoder_layers_3_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] model_decoder_layers_3_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] model_decoder_layers_3_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[563] model_decoder_layers_3_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] model_decoder_layers_3_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[565] model_decoder_layers_3_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] model_decoder_layers_3_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[567] model_decoder_layers_3_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[568] model_decoder_layers_3_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[569] model_decoder_layers_3_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] model_decoder_layers_3_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] model_decoder_layers_3_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[572] model_decoder_layers_3_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] model_decoder_layers_3_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[574] model_decoder_layers_3_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] model_decoder_layers_3_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[576] model_decoder_layers_3_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[577] model_decoder_layers_3_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[578] model_decoder_layers_3_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] model_decoder_layers_3_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[580] model_decoder_layers_3_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] model_decoder_layers_3_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[582] model_decoder_layers_3_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[583] model_decoder_layers_3_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[584] model_decoder_layers_4_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] model_decoder_layers_4_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] model_decoder_layers_4_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[587] model_decoder_layers_4_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] model_decoder_layers_4_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[589] model_decoder_layers_4_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] model_decoder_layers_4_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[591] model_decoder_layers_4_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[592] model_decoder_layers_4_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[593] model_decoder_layers_4_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] model_decoder_layers_4_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] model_decoder_layers_4_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[596] model_decoder_layers_4_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] model_decoder_layers_4_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[598] model_decoder_layers_4_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] model_decoder_layers_4_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[600] model_decoder_layers_4_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[601] model_decoder_layers_4_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[602] model_decoder_layers_4_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] model_decoder_layers_4_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[604] model_decoder_layers_4_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] model_decoder_layers_4_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[606] model_decoder_layers_4_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[607] model_decoder_layers_4_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[608] model_decoder_layers_5_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] model_decoder_layers_5_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] model_decoder_layers_5_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[611] model_decoder_layers_5_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] model_decoder_layers_5_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[613] model_decoder_layers_5_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] model_decoder_layers_5_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[615] model_decoder_layers_5_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[616] model_decoder_layers_5_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[617] model_decoder_layers_5_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] model_decoder_layers_5_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] model_decoder_layers_5_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[620] model_decoder_layers_5_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] model_decoder_layers_5_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[622] model_decoder_layers_5_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] model_decoder_layers_5_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[624] model_decoder_layers_5_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[625] model_decoder_layers_5_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[626] model_decoder_layers_5_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] model_decoder_layers_5_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[628] model_decoder_layers_5_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] model_decoder_layers_5_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[630] model_decoder_layers_5_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[631] model_decoder_layers_5_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[632] model_decoder_layers_6_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] model_decoder_layers_6_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] model_decoder_layers_6_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[635] model_decoder_layers_6_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] model_decoder_layers_6_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[637] model_decoder_layers_6_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] model_decoder_layers_6_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[639] model_decoder_layers_6_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[640] model_decoder_layers_6_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[641] model_decoder_layers_6_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] model_decoder_layers_6_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] model_decoder_layers_6_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[644] model_decoder_layers_6_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] model_decoder_layers_6_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[646] model_decoder_layers_6_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] model_decoder_layers_6_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[648] model_decoder_layers_6_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[649] model_decoder_layers_6_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[650] model_decoder_layers_6_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] model_decoder_layers_6_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[652] model_decoder_layers_6_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] model_decoder_layers_6_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[654] model_decoder_layers_6_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[655] model_decoder_layers_6_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[656] model_decoder_layers_7_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] model_decoder_layers_7_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] model_decoder_layers_7_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[659] model_decoder_layers_7_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] model_decoder_layers_7_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[661] model_decoder_layers_7_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] model_decoder_layers_7_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[663] model_decoder_layers_7_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[664] model_decoder_layers_7_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[665] model_decoder_layers_7_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] model_decoder_layers_7_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] model_decoder_layers_7_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[668] model_decoder_layers_7_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] model_decoder_layers_7_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[670] model_decoder_layers_7_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] model_decoder_layers_7_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[672] model_decoder_layers_7_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[673] model_decoder_layers_7_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[674] model_decoder_layers_7_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] model_decoder_layers_7_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[676] model_decoder_layers_7_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] model_decoder_layers_7_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[678] model_decoder_layers_7_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[679] model_decoder_layers_7_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[680] model_decoder_layers_8_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] model_decoder_layers_8_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] model_decoder_layers_8_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[683] model_decoder_layers_8_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] model_decoder_layers_8_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[685] model_decoder_layers_8_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] model_decoder_layers_8_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[687] model_decoder_layers_8_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[688] model_decoder_layers_8_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[689] model_decoder_layers_8_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] model_decoder_layers_8_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] model_decoder_layers_8_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[692] model_decoder_layers_8_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] model_decoder_layers_8_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[694] model_decoder_layers_8_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] model_decoder_layers_8_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[696] model_decoder_layers_8_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[697] model_decoder_layers_8_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[698] model_decoder_layers_8_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] model_decoder_layers_8_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[700] model_decoder_layers_8_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] model_decoder_layers_8_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[702] model_decoder_layers_8_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[703] model_decoder_layers_8_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[704] model_decoder_layers_9_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] model_decoder_layers_9_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] model_decoder_layers_9_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[707] model_decoder_layers_9_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] model_decoder_layers_9_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[709] model_decoder_layers_9_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] model_decoder_layers_9_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[711] model_decoder_layers_9_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[712] model_decoder_layers_9_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[713] model_decoder_layers_9_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] model_decoder_layers_9_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] model_decoder_layers_9_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[716] model_decoder_layers_9_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] model_decoder_layers_9_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[718] model_decoder_layers_9_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] model_decoder_layers_9_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[720] model_decoder_layers_9_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[721] model_decoder_layers_9_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[722] model_decoder_layers_9_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] model_decoder_layers_9_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[724] model_decoder_layers_9_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] model_decoder_layers_9_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[726] model_decoder_layers_9_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[727] model_decoder_layers_9_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[728] model_decoder_layers_10_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] model_decoder_layers_10_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] model_decoder_layers_10_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[731] model_decoder_layers_10_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] model_decoder_layers_10_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[733] model_decoder_layers_10_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] model_decoder_layers_10_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[735] model_decoder_layers_10_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[736] model_decoder_layers_10_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[737] model_decoder_layers_10_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] model_decoder_layers_10_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] model_decoder_layers_10_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[740] model_decoder_layers_10_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] model_decoder_layers_10_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[742] model_decoder_layers_10_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] model_decoder_layers_10_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[744] model_decoder_layers_10_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[745] model_decoder_layers_10_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[746] model_decoder_layers_10_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] model_decoder_layers_10_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[748] model_decoder_layers_10_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] model_decoder_layers_10_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[750] model_decoder_layers_10_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[751] model_decoder_layers_10_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[752] model_decoder_layers_11_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] model_decoder_layers_11_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] model_decoder_layers_11_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[755] model_decoder_layers_11_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] model_decoder_layers_11_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[757] model_decoder_layers_11_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] model_decoder_layers_11_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[759] model_decoder_layers_11_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[760] model_decoder_layers_11_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[761] model_decoder_layers_11_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] model_decoder_layers_11_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] model_decoder_layers_11_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[764] model_decoder_layers_11_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] model_decoder_layers_11_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[766] model_decoder_layers_11_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] model_decoder_layers_11_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[768] model_decoder_layers_11_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[769] model_decoder_layers_11_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[770] model_decoder_layers_11_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] model_decoder_layers_11_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[772] model_decoder_layers_11_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] model_decoder_layers_11_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[774] model_decoder_layers_11_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[775] model_decoder_layers_11_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[776] model_decoder_layers_12_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] model_decoder_layers_12_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] model_decoder_layers_12_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[779] model_decoder_layers_12_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] model_decoder_layers_12_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[781] model_decoder_layers_12_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] model_decoder_layers_12_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[783] model_decoder_layers_12_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[784] model_decoder_layers_12_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[785] model_decoder_layers_12_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] model_decoder_layers_12_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] model_decoder_layers_12_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[788] model_decoder_layers_12_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] model_decoder_layers_12_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[790] model_decoder_layers_12_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] model_decoder_layers_12_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[792] model_decoder_layers_12_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[793] model_decoder_layers_12_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[794] model_decoder_layers_12_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] model_decoder_layers_12_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[796] model_decoder_layers_12_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] model_decoder_layers_12_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[798] model_decoder_layers_12_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[799] model_decoder_layers_12_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[800] model_decoder_layers_13_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] model_decoder_layers_13_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] model_decoder_layers_13_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[803] model_decoder_layers_13_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] model_decoder_layers_13_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[805] model_decoder_layers_13_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] model_decoder_layers_13_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[807] model_decoder_layers_13_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[808] model_decoder_layers_13_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[809] model_decoder_layers_13_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] model_decoder_layers_13_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] model_decoder_layers_13_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[812] model_decoder_layers_13_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] model_decoder_layers_13_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[814] model_decoder_layers_13_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] model_decoder_layers_13_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[816] model_decoder_layers_13_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[817] model_decoder_layers_13_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[818] model_decoder_layers_13_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] model_decoder_layers_13_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[820] model_decoder_layers_13_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] model_decoder_layers_13_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[822] model_decoder_layers_13_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[823] model_decoder_layers_13_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[824] model_decoder_layers_14_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] model_decoder_layers_14_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] model_decoder_layers_14_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[827] model_decoder_layers_14_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] model_decoder_layers_14_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[829] model_decoder_layers_14_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] model_decoder_layers_14_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[831] model_decoder_layers_14_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[832] model_decoder_layers_14_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[833] model_decoder_layers_14_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] model_decoder_layers_14_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] model_decoder_layers_14_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[836] model_decoder_layers_14_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] model_decoder_layers_14_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[838] model_decoder_layers_14_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] model_decoder_layers_14_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[840] model_decoder_layers_14_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[841] model_decoder_layers_14_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[842] model_decoder_layers_14_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] model_decoder_layers_14_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[844] model_decoder_layers_14_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] model_decoder_layers_14_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[846] model_decoder_layers_14_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[847] model_decoder_layers_14_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[848] model_decoder_layers_15_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] model_decoder_layers_15_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] model_decoder_layers_15_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[851] model_decoder_layers_15_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] model_decoder_layers_15_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[853] model_decoder_layers_15_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] model_decoder_layers_15_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[855] model_decoder_layers_15_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[856] model_decoder_layers_15_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[857] model_decoder_layers_15_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] model_decoder_layers_15_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] model_decoder_layers_15_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[860] model_decoder_layers_15_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] model_decoder_layers_15_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[862] model_decoder_layers_15_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] model_decoder_layers_15_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[864] model_decoder_layers_15_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[865] model_decoder_layers_15_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[866] model_decoder_layers_15_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] model_decoder_layers_15_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[868] model_decoder_layers_15_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] model_decoder_layers_15_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[870] model_decoder_layers_15_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[871] model_decoder_layers_15_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[872] model_decoder_layers_16_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] model_decoder_layers_16_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] model_decoder_layers_16_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[875] model_decoder_layers_16_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] model_decoder_layers_16_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[877] model_decoder_layers_16_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] model_decoder_layers_16_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[879] model_decoder_layers_16_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[880] model_decoder_layers_16_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[881] model_decoder_layers_16_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] model_decoder_layers_16_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] model_decoder_layers_16_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[884] model_decoder_layers_16_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] model_decoder_layers_16_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[886] model_decoder_layers_16_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] model_decoder_layers_16_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[888] model_decoder_layers_16_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[889] model_decoder_layers_16_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[890] model_decoder_layers_16_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] model_decoder_layers_16_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[892] model_decoder_layers_16_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] model_decoder_layers_16_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[894] model_decoder_layers_16_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[895] model_decoder_layers_16_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[896] model_decoder_layers_17_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] model_decoder_layers_17_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] model_decoder_layers_17_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[899] model_decoder_layers_17_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] model_decoder_layers_17_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[901] model_decoder_layers_17_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] model_decoder_layers_17_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[903] model_decoder_layers_17_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[904] model_decoder_layers_17_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[905] model_decoder_layers_17_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] model_decoder_layers_17_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] model_decoder_layers_17_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[908] model_decoder_layers_17_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] model_decoder_layers_17_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[910] model_decoder_layers_17_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] model_decoder_layers_17_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[912] model_decoder_layers_17_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[913] model_decoder_layers_17_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[914] model_decoder_layers_17_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] model_decoder_layers_17_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[916] model_decoder_layers_17_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] model_decoder_layers_17_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[918] model_decoder_layers_17_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[919] model_decoder_layers_17_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[920] model_decoder_layers_18_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] model_decoder_layers_18_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] model_decoder_layers_18_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[923] model_decoder_layers_18_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] model_decoder_layers_18_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[925] model_decoder_layers_18_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] model_decoder_layers_18_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[927] model_decoder_layers_18_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[928] model_decoder_layers_18_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[929] model_decoder_layers_18_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] model_decoder_layers_18_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] model_decoder_layers_18_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[932] model_decoder_layers_18_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] model_decoder_layers_18_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[934] model_decoder_layers_18_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] model_decoder_layers_18_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[936] model_decoder_layers_18_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[937] model_decoder_layers_18_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[938] model_decoder_layers_18_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] model_decoder_layers_18_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[940] model_decoder_layers_18_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] model_decoder_layers_18_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[942] model_decoder_layers_18_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[943] model_decoder_layers_18_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[944] model_decoder_layers_19_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] model_decoder_layers_19_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] model_decoder_layers_19_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[947] model_decoder_layers_19_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] model_decoder_layers_19_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[949] model_decoder_layers_19_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] model_decoder_layers_19_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[951] model_decoder_layers_19_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[952] model_decoder_layers_19_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[953] model_decoder_layers_19_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] model_decoder_layers_19_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] model_decoder_layers_19_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[956] model_decoder_layers_19_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] model_decoder_layers_19_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[958] model_decoder_layers_19_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] model_decoder_layers_19_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[960] model_decoder_layers_19_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[961] model_decoder_layers_19_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[962] model_decoder_layers_19_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] model_decoder_layers_19_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[964] model_decoder_layers_19_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] model_decoder_layers_19_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[966] model_decoder_layers_19_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[967] model_decoder_layers_19_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[968] model_decoder_layers_20_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] model_decoder_layers_20_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] model_decoder_layers_20_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[971] model_decoder_layers_20_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] model_decoder_layers_20_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[973] model_decoder_layers_20_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] model_decoder_layers_20_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[975] model_decoder_layers_20_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[976] model_decoder_layers_20_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[977] model_decoder_layers_20_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] model_decoder_layers_20_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] model_decoder_layers_20_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[980] model_decoder_layers_20_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] model_decoder_layers_20_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[982] model_decoder_layers_20_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] model_decoder_layers_20_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[984] model_decoder_layers_20_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[985] model_decoder_layers_20_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[986] model_decoder_layers_20_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] model_decoder_layers_20_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[988] model_decoder_layers_20_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] model_decoder_layers_20_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[990] model_decoder_layers_20_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[991] model_decoder_layers_20_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[992] model_decoder_layers_21_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] model_decoder_layers_21_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] model_decoder_layers_21_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[995] model_decoder_layers_21_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] model_decoder_layers_21_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[997] model_decoder_layers_21_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] model_decoder_layers_21_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[999] model_decoder_layers_21_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1000] model_decoder_layers_21_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1001] model_decoder_layers_21_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] model_decoder_layers_21_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] model_decoder_layers_21_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1004] model_decoder_layers_21_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] model_decoder_layers_21_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1006] model_decoder_layers_21_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] model_decoder_layers_21_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1008] model_decoder_layers_21_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1009] model_decoder_layers_21_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1010] model_decoder_layers_21_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] model_decoder_layers_21_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1012] model_decoder_layers_21_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] model_decoder_layers_21_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1014] model_decoder_layers_21_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1015] model_decoder_layers_21_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1016] model_decoder_layers_22_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] model_decoder_layers_22_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] model_decoder_layers_22_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1019] model_decoder_layers_22_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] model_decoder_layers_22_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1021] model_decoder_layers_22_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] model_decoder_layers_22_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1023] model_decoder_layers_22_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1024] model_decoder_layers_22_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1025] model_decoder_layers_22_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] model_decoder_layers_22_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] model_decoder_layers_22_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1028] model_decoder_layers_22_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] model_decoder_layers_22_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1030] model_decoder_layers_22_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] model_decoder_layers_22_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1032] model_decoder_layers_22_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1033] model_decoder_layers_22_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1034] model_decoder_layers_22_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] model_decoder_layers_22_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1036] model_decoder_layers_22_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] model_decoder_layers_22_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1038] model_decoder_layers_22_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1039] model_decoder_layers_22_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1040] model_decoder_layers_23_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] model_decoder_layers_23_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] model_decoder_layers_23_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1043] model_decoder_layers_23_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] model_decoder_layers_23_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1045] model_decoder_layers_23_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] model_decoder_layers_23_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1047] model_decoder_layers_23_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1048] model_decoder_layers_23_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1049] model_decoder_layers_23_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] model_decoder_layers_23_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] model_decoder_layers_23_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1052] model_decoder_layers_23_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] model_decoder_layers_23_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1054] model_decoder_layers_23_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] model_decoder_layers_23_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1056] model_decoder_layers_23_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1057] model_decoder_layers_23_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1058] model_decoder_layers_23_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] model_decoder_layers_23_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1060] model_decoder_layers_23_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] model_decoder_layers_23_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1062] model_decoder_layers_23_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1063] model_decoder_layers_23_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1064] model_decoder_layers_24_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] model_decoder_layers_24_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] model_decoder_layers_24_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1067] model_decoder_layers_24_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] model_decoder_layers_24_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1069] model_decoder_layers_24_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] model_decoder_layers_24_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1071] model_decoder_layers_24_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1072] model_decoder_layers_24_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1073] model_decoder_layers_24_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] model_decoder_layers_24_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] model_decoder_layers_24_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1076] model_decoder_layers_24_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] model_decoder_layers_24_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1078] model_decoder_layers_24_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] model_decoder_layers_24_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1080] model_decoder_layers_24_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1081] model_decoder_layers_24_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1082] model_decoder_layers_24_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] model_decoder_layers_24_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1084] model_decoder_layers_24_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] model_decoder_layers_24_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1086] model_decoder_layers_24_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1087] model_decoder_layers_24_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1088] model_decoder_layers_25_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] model_decoder_layers_25_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] model_decoder_layers_25_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1091] model_decoder_layers_25_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] model_decoder_layers_25_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1093] model_decoder_layers_25_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] model_decoder_layers_25_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1095] model_decoder_layers_25_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1096] model_decoder_layers_25_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1097] model_decoder_layers_25_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] model_decoder_layers_25_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] model_decoder_layers_25_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1100] model_decoder_layers_25_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] model_decoder_layers_25_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1102] model_decoder_layers_25_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] model_decoder_layers_25_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1104] model_decoder_layers_25_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1105] model_decoder_layers_25_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1106] model_decoder_layers_25_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] model_decoder_layers_25_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1108] model_decoder_layers_25_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] model_decoder_layers_25_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1110] model_decoder_layers_25_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1111] model_decoder_layers_25_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1112] model_decoder_layers_26_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] model_decoder_layers_26_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] model_decoder_layers_26_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1115] model_decoder_layers_26_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] model_decoder_layers_26_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1117] model_decoder_layers_26_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] model_decoder_layers_26_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1119] model_decoder_layers_26_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1120] model_decoder_layers_26_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1121] model_decoder_layers_26_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] model_decoder_layers_26_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] model_decoder_layers_26_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1124] model_decoder_layers_26_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] model_decoder_layers_26_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1126] model_decoder_layers_26_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] model_decoder_layers_26_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1128] model_decoder_layers_26_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1129] model_decoder_layers_26_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1130] model_decoder_layers_26_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] model_decoder_layers_26_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1132] model_decoder_layers_26_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] model_decoder_layers_26_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1134] model_decoder_layers_26_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1135] model_decoder_layers_26_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1136] model_decoder_layers_27_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] model_decoder_layers_27_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] model_decoder_layers_27_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1139] model_decoder_layers_27_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] model_decoder_layers_27_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1141] model_decoder_layers_27_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] model_decoder_layers_27_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1143] model_decoder_layers_27_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1144] model_decoder_layers_27_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1145] model_decoder_layers_27_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] model_decoder_layers_27_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] model_decoder_layers_27_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1148] model_decoder_layers_27_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] model_decoder_layers_27_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1150] model_decoder_layers_27_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] model_decoder_layers_27_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1152] model_decoder_layers_27_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1153] model_decoder_layers_27_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1154] model_decoder_layers_27_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] model_decoder_layers_27_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1156] model_decoder_layers_27_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] model_decoder_layers_27_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1158] model_decoder_layers_27_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1159] model_decoder_layers_27_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1160] model_decoder_layers_28_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] model_decoder_layers_28_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] model_decoder_layers_28_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1163] model_decoder_layers_28_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] model_decoder_layers_28_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1165] model_decoder_layers_28_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] model_decoder_layers_28_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1167] model_decoder_layers_28_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1168] model_decoder_layers_28_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1169] model_decoder_layers_28_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] model_decoder_layers_28_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] model_decoder_layers_28_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1172] model_decoder_layers_28_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] model_decoder_layers_28_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1174] model_decoder_layers_28_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] model_decoder_layers_28_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1176] model_decoder_layers_28_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1177] model_decoder_layers_28_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1178] model_decoder_layers_28_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] model_decoder_layers_28_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1180] model_decoder_layers_28_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] model_decoder_layers_28_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1182] model_decoder_layers_28_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1183] model_decoder_layers_28_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1184] model_decoder_layers_29_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] model_decoder_layers_29_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] model_decoder_layers_29_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1187] model_decoder_layers_29_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] model_decoder_layers_29_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1189] model_decoder_layers_29_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] model_decoder_layers_29_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1191] model_decoder_layers_29_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1192] model_decoder_layers_29_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1193] model_decoder_layers_29_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] model_decoder_layers_29_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] model_decoder_layers_29_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1196] model_decoder_layers_29_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] model_decoder_layers_29_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1198] model_decoder_layers_29_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] model_decoder_layers_29_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1200] model_decoder_layers_29_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1201] model_decoder_layers_29_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1202] model_decoder_layers_29_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] model_decoder_layers_29_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1204] model_decoder_layers_29_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] model_decoder_layers_29_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1206] model_decoder_layers_29_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1207] model_decoder_layers_29_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1208] model_decoder_layers_30_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] model_decoder_layers_30_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] model_decoder_layers_30_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1211] model_decoder_layers_30_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] model_decoder_layers_30_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1213] model_decoder_layers_30_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] model_decoder_layers_30_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1215] model_decoder_layers_30_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1216] model_decoder_layers_30_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1217] model_decoder_layers_30_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] model_decoder_layers_30_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] model_decoder_layers_30_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1220] model_decoder_layers_30_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] model_decoder_layers_30_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1222] model_decoder_layers_30_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] model_decoder_layers_30_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1224] model_decoder_layers_30_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1225] model_decoder_layers_30_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1226] model_decoder_layers_30_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] model_decoder_layers_30_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1228] model_decoder_layers_30_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] model_decoder_layers_30_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1230] model_decoder_layers_30_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1231] model_decoder_layers_30_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1232] model_decoder_layers_31_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] model_decoder_layers_31_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] model_decoder_layers_31_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1235] model_decoder_layers_31_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] model_decoder_layers_31_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1237] model_decoder_layers_31_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] model_decoder_layers_31_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1239] model_decoder_layers_31_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1240] model_decoder_layers_31_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1241] model_decoder_layers_31_encoder_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] model_decoder_layers_31_encoder_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] model_decoder_layers_31_encoder_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1244] model_decoder_layers_31_encoder_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] model_decoder_layers_31_encoder_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1246] model_decoder_layers_31_encoder_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] model_decoder_layers_31_encoder_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[1248] model_decoder_layers_31_encoder_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1249] model_decoder_layers_31_encoder_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1250] model_decoder_layers_31_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] model_decoder_layers_31_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[1252] model_decoder_layers_31_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] model_decoder_layers_31_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[1254] model_decoder_layers_31_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1255] model_decoder_layers_31_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1256] model_decoder_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[1257] model_decoder_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[1258] lv: R.Tensor((batch_size, 1280, 3000), dtype="float16") = R.nn.conv1d(input_features, model_encoder_conv1_weight, strides=[1], padding=[1, 1], dilation=[1], groups=1, data_layout="NCW", kernel_layout="OIW", out_layout="NCW", out_dtype="void") lv1: R.Tensor((1, 1280, 1), dtype="float16") = R.reshape(model_encoder_conv1_bias, R.shape([1, 1280, 1])) conv1d: R.Tensor((batch_size, 1280, 3000), dtype="float16") = R.add(lv, lv1) gelu: R.Tensor((batch_size, 1280, 3000), dtype="float16") = R.nn.gelu(conv1d) lv2: R.Tensor((batch_size, 1280, 1500), dtype="float16") = R.nn.conv1d(gelu, model_encoder_conv2_weight, strides=[2], padding=[1, 1], dilation=[1], groups=1, data_layout="NCW", kernel_layout="OIW", out_layout="NCW", out_dtype="void") lv3: R.Tensor((1, 1280, 1), dtype="float16") = R.reshape(model_encoder_conv2_bias, R.shape([1, 1280, 1])) conv1d1: R.Tensor((batch_size, 1280, 1500), dtype="float16") = R.add(lv2, lv3) gelu1: R.Tensor((batch_size, 1280, 1500), dtype="float16") = R.nn.gelu(conv1d1) permute_dims: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.permute_dims(gelu1, axes=[0, 2, 1]) add: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(permute_dims, model_encoder_embed_positions_weight) layer_norm: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add, model_encoder_layers_0_self_attn_layer_norm_weight, model_encoder_layers_0_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_0_self_attn_q_proj_weight, axes=None) matmul: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm, permute_dims1, out_dtype="void") add1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul, model_encoder_layers_0_self_attn_q_proj_bias) reshape: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add1, R.shape([batch_size, 1500, 20, 64])) permute_dims2: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_0_self_attn_k_proj_weight, axes=None) matmul1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm, permute_dims2, out_dtype="void") reshape1: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul1, R.shape([batch_size, 1500, 20, 64])) permute_dims3: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_0_self_attn_v_proj_weight, axes=None) matmul2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm, permute_dims3, out_dtype="void") add2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul2, model_encoder_layers_0_self_attn_v_proj_bias) reshape2: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add2, R.shape([batch_size, 1500, 20, 64])) reshape3: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape, R.shape([batch_size * 1500, 20, 64])) reshape4: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape1, R.shape([batch_size * 1500, 20, 64])) reshape5: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape2, R.shape([batch_size * 1500, 20, 64])) lv4 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape3, reshape4, reshape5), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape6: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv4, R.shape([batch_size, 1500, 20, 64])) reshape7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape6, R.shape([batch_size, 1500, 1280])) permute_dims4: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_0_self_attn_out_proj_weight, axes=None) matmul3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape7, permute_dims4, out_dtype="void") add3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul3, model_encoder_layers_0_self_attn_out_proj_bias) add4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add, add3) layer_norm1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add4, model_encoder_layers_0_final_layer_norm_weight, model_encoder_layers_0_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims5: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_0_fc1_weight, axes=None) matmul4: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm1, permute_dims5, out_dtype="void") add5: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul4, model_encoder_layers_0_fc1_bias) gelu2: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add5) permute_dims6: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_0_fc2_weight, axes=None) matmul5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu2, permute_dims6, out_dtype="void") add6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul5, model_encoder_layers_0_fc2_bias) add7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add4, add6) maximum: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add7, R.const(-65504, "float16")) minimum: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum, R.const(65504, "float16")) layer_norm2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum, model_encoder_layers_1_self_attn_layer_norm_weight, model_encoder_layers_1_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims7: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_1_self_attn_q_proj_weight, axes=None) matmul6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm2, permute_dims7, out_dtype="void") add8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul6, model_encoder_layers_1_self_attn_q_proj_bias) reshape8: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add8, R.shape([batch_size, 1500, 20, 64])) permute_dims8: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_1_self_attn_k_proj_weight, axes=None) matmul7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm2, permute_dims8, out_dtype="void") reshape9: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul7, R.shape([batch_size, 1500, 20, 64])) permute_dims9: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_1_self_attn_v_proj_weight, axes=None) matmul8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm2, permute_dims9, out_dtype="void") add9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul8, model_encoder_layers_1_self_attn_v_proj_bias) reshape10: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add9, R.shape([batch_size, 1500, 20, 64])) reshape11: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape8, R.shape([batch_size * 1500, 20, 64])) reshape12: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape9, R.shape([batch_size * 1500, 20, 64])) reshape13: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape10, R.shape([batch_size * 1500, 20, 64])) lv5 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape11, reshape12, reshape13), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape14: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv5, R.shape([batch_size, 1500, 20, 64])) reshape15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape14, R.shape([batch_size, 1500, 1280])) permute_dims10: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_1_self_attn_out_proj_weight, axes=None) matmul9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape15, permute_dims10, out_dtype="void") add10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul9, model_encoder_layers_1_self_attn_out_proj_bias) add11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum, add10) layer_norm3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add11, model_encoder_layers_1_final_layer_norm_weight, model_encoder_layers_1_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims11: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_1_fc1_weight, axes=None) matmul10: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm3, permute_dims11, out_dtype="void") add12: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul10, model_encoder_layers_1_fc1_bias) gelu3: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add12) permute_dims12: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_1_fc2_weight, axes=None) matmul11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu3, permute_dims12, out_dtype="void") add13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul11, model_encoder_layers_1_fc2_bias) add14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add11, add13) maximum1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add14, R.const(-65504, "float16")) minimum1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum1, R.const(65504, "float16")) layer_norm4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum1, model_encoder_layers_2_self_attn_layer_norm_weight, model_encoder_layers_2_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims13: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_2_self_attn_q_proj_weight, axes=None) matmul12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm4, permute_dims13, out_dtype="void") add15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul12, model_encoder_layers_2_self_attn_q_proj_bias) reshape16: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add15, R.shape([batch_size, 1500, 20, 64])) permute_dims14: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_2_self_attn_k_proj_weight, axes=None) matmul13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm4, permute_dims14, out_dtype="void") reshape17: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul13, R.shape([batch_size, 1500, 20, 64])) permute_dims15: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_2_self_attn_v_proj_weight, axes=None) matmul14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm4, permute_dims15, out_dtype="void") add16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul14, model_encoder_layers_2_self_attn_v_proj_bias) reshape18: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add16, R.shape([batch_size, 1500, 20, 64])) reshape19: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape16, R.shape([batch_size * 1500, 20, 64])) reshape20: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape17, R.shape([batch_size * 1500, 20, 64])) reshape21: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape18, R.shape([batch_size * 1500, 20, 64])) lv6 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape19, reshape20, reshape21), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape22: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv6, R.shape([batch_size, 1500, 20, 64])) reshape23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape22, R.shape([batch_size, 1500, 1280])) permute_dims16: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_2_self_attn_out_proj_weight, axes=None) matmul15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape23, permute_dims16, out_dtype="void") add17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul15, model_encoder_layers_2_self_attn_out_proj_bias) add18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum1, add17) layer_norm5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add18, model_encoder_layers_2_final_layer_norm_weight, model_encoder_layers_2_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims17: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_2_fc1_weight, axes=None) matmul16: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm5, permute_dims17, out_dtype="void") add19: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul16, model_encoder_layers_2_fc1_bias) gelu4: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add19) permute_dims18: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_2_fc2_weight, axes=None) matmul17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu4, permute_dims18, out_dtype="void") add20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul17, model_encoder_layers_2_fc2_bias) add21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add18, add20) maximum2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add21, R.const(-65504, "float16")) minimum2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum2, R.const(65504, "float16")) layer_norm6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum2, model_encoder_layers_3_self_attn_layer_norm_weight, model_encoder_layers_3_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims19: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_3_self_attn_q_proj_weight, axes=None) matmul18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm6, permute_dims19, out_dtype="void") add22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul18, model_encoder_layers_3_self_attn_q_proj_bias) reshape24: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add22, R.shape([batch_size, 1500, 20, 64])) permute_dims20: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_3_self_attn_k_proj_weight, axes=None) matmul19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm6, permute_dims20, out_dtype="void") reshape25: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul19, R.shape([batch_size, 1500, 20, 64])) permute_dims21: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_3_self_attn_v_proj_weight, axes=None) matmul20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm6, permute_dims21, out_dtype="void") add23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul20, model_encoder_layers_3_self_attn_v_proj_bias) reshape26: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add23, R.shape([batch_size, 1500, 20, 64])) reshape27: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape24, R.shape([batch_size * 1500, 20, 64])) reshape28: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape25, R.shape([batch_size * 1500, 20, 64])) reshape29: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape26, R.shape([batch_size * 1500, 20, 64])) lv7 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape27, reshape28, reshape29), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape30: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv7, R.shape([batch_size, 1500, 20, 64])) reshape31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape30, R.shape([batch_size, 1500, 1280])) permute_dims22: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_3_self_attn_out_proj_weight, axes=None) matmul21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape31, permute_dims22, out_dtype="void") add24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul21, model_encoder_layers_3_self_attn_out_proj_bias) add25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum2, add24) layer_norm7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add25, model_encoder_layers_3_final_layer_norm_weight, model_encoder_layers_3_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims23: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_3_fc1_weight, axes=None) matmul22: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm7, permute_dims23, out_dtype="void") add26: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul22, model_encoder_layers_3_fc1_bias) gelu5: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add26) permute_dims24: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_3_fc2_weight, axes=None) matmul23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu5, permute_dims24, out_dtype="void") add27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul23, model_encoder_layers_3_fc2_bias) add28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add25, add27) maximum3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add28, R.const(-65504, "float16")) minimum3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum3, R.const(65504, "float16")) layer_norm8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum3, model_encoder_layers_4_self_attn_layer_norm_weight, model_encoder_layers_4_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims25: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_4_self_attn_q_proj_weight, axes=None) matmul24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm8, permute_dims25, out_dtype="void") add29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul24, model_encoder_layers_4_self_attn_q_proj_bias) reshape32: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add29, R.shape([batch_size, 1500, 20, 64])) permute_dims26: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_4_self_attn_k_proj_weight, axes=None) matmul25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm8, permute_dims26, out_dtype="void") reshape33: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul25, R.shape([batch_size, 1500, 20, 64])) permute_dims27: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_4_self_attn_v_proj_weight, axes=None) matmul26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm8, permute_dims27, out_dtype="void") add30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul26, model_encoder_layers_4_self_attn_v_proj_bias) reshape34: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add30, R.shape([batch_size, 1500, 20, 64])) reshape35: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape32, R.shape([batch_size * 1500, 20, 64])) reshape36: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape33, R.shape([batch_size * 1500, 20, 64])) reshape37: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape34, R.shape([batch_size * 1500, 20, 64])) lv8 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape35, reshape36, reshape37), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape38: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv8, R.shape([batch_size, 1500, 20, 64])) reshape39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape38, R.shape([batch_size, 1500, 1280])) permute_dims28: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_4_self_attn_out_proj_weight, axes=None) matmul27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape39, permute_dims28, out_dtype="void") add31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul27, model_encoder_layers_4_self_attn_out_proj_bias) add32: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum3, add31) layer_norm9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add32, model_encoder_layers_4_final_layer_norm_weight, model_encoder_layers_4_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims29: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_4_fc1_weight, axes=None) matmul28: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm9, permute_dims29, out_dtype="void") add33: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul28, model_encoder_layers_4_fc1_bias) gelu6: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add33) permute_dims30: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_4_fc2_weight, axes=None) matmul29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu6, permute_dims30, out_dtype="void") add34: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul29, model_encoder_layers_4_fc2_bias) add35: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add32, add34) maximum4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add35, R.const(-65504, "float16")) minimum4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum4, R.const(65504, "float16")) layer_norm10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum4, model_encoder_layers_5_self_attn_layer_norm_weight, model_encoder_layers_5_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims31: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_5_self_attn_q_proj_weight, axes=None) matmul30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm10, permute_dims31, out_dtype="void") add36: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul30, model_encoder_layers_5_self_attn_q_proj_bias) reshape40: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add36, R.shape([batch_size, 1500, 20, 64])) permute_dims32: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_5_self_attn_k_proj_weight, axes=None) matmul31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm10, permute_dims32, out_dtype="void") reshape41: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul31, R.shape([batch_size, 1500, 20, 64])) permute_dims33: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_5_self_attn_v_proj_weight, axes=None) matmul32: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm10, permute_dims33, out_dtype="void") add37: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul32, model_encoder_layers_5_self_attn_v_proj_bias) reshape42: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add37, R.shape([batch_size, 1500, 20, 64])) reshape43: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape40, R.shape([batch_size * 1500, 20, 64])) reshape44: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape41, R.shape([batch_size * 1500, 20, 64])) reshape45: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape42, R.shape([batch_size * 1500, 20, 64])) lv9 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape43, reshape44, reshape45), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape46: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv9, R.shape([batch_size, 1500, 20, 64])) reshape47: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape46, R.shape([batch_size, 1500, 1280])) permute_dims34: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_5_self_attn_out_proj_weight, axes=None) matmul33: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape47, permute_dims34, out_dtype="void") add38: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul33, model_encoder_layers_5_self_attn_out_proj_bias) add39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum4, add38) layer_norm11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add39, model_encoder_layers_5_final_layer_norm_weight, model_encoder_layers_5_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims35: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_5_fc1_weight, axes=None) matmul34: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm11, permute_dims35, out_dtype="void") add40: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul34, model_encoder_layers_5_fc1_bias) gelu7: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add40) permute_dims36: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_5_fc2_weight, axes=None) matmul35: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu7, permute_dims36, out_dtype="void") add41: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul35, model_encoder_layers_5_fc2_bias) add42: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add39, add41) maximum5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add42, R.const(-65504, "float16")) minimum5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum5, R.const(65504, "float16")) layer_norm12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum5, model_encoder_layers_6_self_attn_layer_norm_weight, model_encoder_layers_6_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims37: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_6_self_attn_q_proj_weight, axes=None) matmul36: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm12, permute_dims37, out_dtype="void") add43: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul36, model_encoder_layers_6_self_attn_q_proj_bias) reshape48: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add43, R.shape([batch_size, 1500, 20, 64])) permute_dims38: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_6_self_attn_k_proj_weight, axes=None) matmul37: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm12, permute_dims38, out_dtype="void") reshape49: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul37, R.shape([batch_size, 1500, 20, 64])) permute_dims39: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_6_self_attn_v_proj_weight, axes=None) matmul38: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm12, permute_dims39, out_dtype="void") add44: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul38, model_encoder_layers_6_self_attn_v_proj_bias) reshape50: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add44, R.shape([batch_size, 1500, 20, 64])) reshape51: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape48, R.shape([batch_size * 1500, 20, 64])) reshape52: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape49, R.shape([batch_size * 1500, 20, 64])) reshape53: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape50, R.shape([batch_size * 1500, 20, 64])) lv10 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape51, reshape52, reshape53), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape54: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv10, R.shape([batch_size, 1500, 20, 64])) reshape55: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape54, R.shape([batch_size, 1500, 1280])) permute_dims40: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_6_self_attn_out_proj_weight, axes=None) matmul39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape55, permute_dims40, out_dtype="void") add45: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul39, model_encoder_layers_6_self_attn_out_proj_bias) add46: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum5, add45) layer_norm13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add46, model_encoder_layers_6_final_layer_norm_weight, model_encoder_layers_6_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims41: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_6_fc1_weight, axes=None) matmul40: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm13, permute_dims41, out_dtype="void") add47: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul40, model_encoder_layers_6_fc1_bias) gelu8: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add47) permute_dims42: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_6_fc2_weight, axes=None) matmul41: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu8, permute_dims42, out_dtype="void") add48: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul41, model_encoder_layers_6_fc2_bias) add49: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add46, add48) maximum6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add49, R.const(-65504, "float16")) minimum6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum6, R.const(65504, "float16")) layer_norm14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum6, model_encoder_layers_7_self_attn_layer_norm_weight, model_encoder_layers_7_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims43: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_7_self_attn_q_proj_weight, axes=None) matmul42: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm14, permute_dims43, out_dtype="void") add50: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul42, model_encoder_layers_7_self_attn_q_proj_bias) reshape56: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add50, R.shape([batch_size, 1500, 20, 64])) permute_dims44: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_7_self_attn_k_proj_weight, axes=None) matmul43: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm14, permute_dims44, out_dtype="void") reshape57: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul43, R.shape([batch_size, 1500, 20, 64])) permute_dims45: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_7_self_attn_v_proj_weight, axes=None) matmul44: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm14, permute_dims45, out_dtype="void") add51: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul44, model_encoder_layers_7_self_attn_v_proj_bias) reshape58: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add51, R.shape([batch_size, 1500, 20, 64])) reshape59: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape56, R.shape([batch_size * 1500, 20, 64])) reshape60: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape57, R.shape([batch_size * 1500, 20, 64])) reshape61: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape58, R.shape([batch_size * 1500, 20, 64])) lv11 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape59, reshape60, reshape61), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape62: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv11, R.shape([batch_size, 1500, 20, 64])) reshape63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape62, R.shape([batch_size, 1500, 1280])) permute_dims46: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_7_self_attn_out_proj_weight, axes=None) matmul45: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape63, permute_dims46, out_dtype="void") add52: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul45, model_encoder_layers_7_self_attn_out_proj_bias) add53: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum6, add52) layer_norm15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add53, model_encoder_layers_7_final_layer_norm_weight, model_encoder_layers_7_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims47: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_7_fc1_weight, axes=None) matmul46: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm15, permute_dims47, out_dtype="void") add54: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul46, model_encoder_layers_7_fc1_bias) gelu9: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add54) permute_dims48: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_7_fc2_weight, axes=None) matmul47: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu9, permute_dims48, out_dtype="void") add55: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul47, model_encoder_layers_7_fc2_bias) add56: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add53, add55) maximum7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add56, R.const(-65504, "float16")) minimum7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum7, R.const(65504, "float16")) layer_norm16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum7, model_encoder_layers_8_self_attn_layer_norm_weight, model_encoder_layers_8_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims49: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_8_self_attn_q_proj_weight, axes=None) matmul48: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm16, permute_dims49, out_dtype="void") add57: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul48, model_encoder_layers_8_self_attn_q_proj_bias) reshape64: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add57, R.shape([batch_size, 1500, 20, 64])) permute_dims50: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_8_self_attn_k_proj_weight, axes=None) matmul49: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm16, permute_dims50, out_dtype="void") reshape65: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul49, R.shape([batch_size, 1500, 20, 64])) permute_dims51: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_8_self_attn_v_proj_weight, axes=None) matmul50: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm16, permute_dims51, out_dtype="void") add58: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul50, model_encoder_layers_8_self_attn_v_proj_bias) reshape66: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add58, R.shape([batch_size, 1500, 20, 64])) reshape67: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape64, R.shape([batch_size * 1500, 20, 64])) reshape68: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape65, R.shape([batch_size * 1500, 20, 64])) reshape69: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape66, R.shape([batch_size * 1500, 20, 64])) lv12 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape67, reshape68, reshape69), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape70: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv12, R.shape([batch_size, 1500, 20, 64])) reshape71: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape70, R.shape([batch_size, 1500, 1280])) permute_dims52: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_8_self_attn_out_proj_weight, axes=None) matmul51: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape71, permute_dims52, out_dtype="void") add59: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul51, model_encoder_layers_8_self_attn_out_proj_bias) add60: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum7, add59) layer_norm17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add60, model_encoder_layers_8_final_layer_norm_weight, model_encoder_layers_8_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims53: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_8_fc1_weight, axes=None) matmul52: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm17, permute_dims53, out_dtype="void") add61: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul52, model_encoder_layers_8_fc1_bias) gelu10: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add61) permute_dims54: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_8_fc2_weight, axes=None) matmul53: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu10, permute_dims54, out_dtype="void") add62: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul53, model_encoder_layers_8_fc2_bias) add63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add60, add62) maximum8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add63, R.const(-65504, "float16")) minimum8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum8, R.const(65504, "float16")) layer_norm18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum8, model_encoder_layers_9_self_attn_layer_norm_weight, model_encoder_layers_9_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims55: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_9_self_attn_q_proj_weight, axes=None) matmul54: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm18, permute_dims55, out_dtype="void") add64: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul54, model_encoder_layers_9_self_attn_q_proj_bias) reshape72: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add64, R.shape([batch_size, 1500, 20, 64])) permute_dims56: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_9_self_attn_k_proj_weight, axes=None) matmul55: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm18, permute_dims56, out_dtype="void") reshape73: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul55, R.shape([batch_size, 1500, 20, 64])) permute_dims57: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_9_self_attn_v_proj_weight, axes=None) matmul56: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm18, permute_dims57, out_dtype="void") add65: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul56, model_encoder_layers_9_self_attn_v_proj_bias) reshape74: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add65, R.shape([batch_size, 1500, 20, 64])) reshape75: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape72, R.shape([batch_size * 1500, 20, 64])) reshape76: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape73, R.shape([batch_size * 1500, 20, 64])) reshape77: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape74, R.shape([batch_size * 1500, 20, 64])) lv13 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape75, reshape76, reshape77), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape78: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv13, R.shape([batch_size, 1500, 20, 64])) reshape79: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape78, R.shape([batch_size, 1500, 1280])) permute_dims58: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_9_self_attn_out_proj_weight, axes=None) matmul57: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape79, permute_dims58, out_dtype="void") add66: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul57, model_encoder_layers_9_self_attn_out_proj_bias) add67: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum8, add66) layer_norm19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add67, model_encoder_layers_9_final_layer_norm_weight, model_encoder_layers_9_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims59: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_9_fc1_weight, axes=None) matmul58: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm19, permute_dims59, out_dtype="void") add68: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul58, model_encoder_layers_9_fc1_bias) gelu11: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add68) permute_dims60: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_9_fc2_weight, axes=None) matmul59: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu11, permute_dims60, out_dtype="void") add69: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul59, model_encoder_layers_9_fc2_bias) add70: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add67, add69) maximum9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add70, R.const(-65504, "float16")) minimum9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum9, R.const(65504, "float16")) layer_norm20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum9, model_encoder_layers_10_self_attn_layer_norm_weight, model_encoder_layers_10_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims61: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_10_self_attn_q_proj_weight, axes=None) matmul60: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm20, permute_dims61, out_dtype="void") add71: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul60, model_encoder_layers_10_self_attn_q_proj_bias) reshape80: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add71, R.shape([batch_size, 1500, 20, 64])) permute_dims62: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_10_self_attn_k_proj_weight, axes=None) matmul61: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm20, permute_dims62, out_dtype="void") reshape81: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul61, R.shape([batch_size, 1500, 20, 64])) permute_dims63: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_10_self_attn_v_proj_weight, axes=None) matmul62: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm20, permute_dims63, out_dtype="void") add72: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul62, model_encoder_layers_10_self_attn_v_proj_bias) reshape82: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add72, R.shape([batch_size, 1500, 20, 64])) reshape83: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape80, R.shape([batch_size * 1500, 20, 64])) reshape84: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape81, R.shape([batch_size * 1500, 20, 64])) reshape85: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape82, R.shape([batch_size * 1500, 20, 64])) lv14 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape83, reshape84, reshape85), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape86: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv14, R.shape([batch_size, 1500, 20, 64])) reshape87: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape86, R.shape([batch_size, 1500, 1280])) permute_dims64: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_10_self_attn_out_proj_weight, axes=None) matmul63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape87, permute_dims64, out_dtype="void") add73: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul63, model_encoder_layers_10_self_attn_out_proj_bias) add74: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum9, add73) layer_norm21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add74, model_encoder_layers_10_final_layer_norm_weight, model_encoder_layers_10_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims65: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_10_fc1_weight, axes=None) matmul64: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm21, permute_dims65, out_dtype="void") add75: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul64, model_encoder_layers_10_fc1_bias) gelu12: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add75) permute_dims66: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_10_fc2_weight, axes=None) matmul65: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu12, permute_dims66, out_dtype="void") add76: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul65, model_encoder_layers_10_fc2_bias) add77: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add74, add76) maximum10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add77, R.const(-65504, "float16")) minimum10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum10, R.const(65504, "float16")) layer_norm22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum10, model_encoder_layers_11_self_attn_layer_norm_weight, model_encoder_layers_11_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims67: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_11_self_attn_q_proj_weight, axes=None) matmul66: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm22, permute_dims67, out_dtype="void") add78: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul66, model_encoder_layers_11_self_attn_q_proj_bias) reshape88: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add78, R.shape([batch_size, 1500, 20, 64])) permute_dims68: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_11_self_attn_k_proj_weight, axes=None) matmul67: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm22, permute_dims68, out_dtype="void") reshape89: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul67, R.shape([batch_size, 1500, 20, 64])) permute_dims69: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_11_self_attn_v_proj_weight, axes=None) matmul68: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm22, permute_dims69, out_dtype="void") add79: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul68, model_encoder_layers_11_self_attn_v_proj_bias) reshape90: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add79, R.shape([batch_size, 1500, 20, 64])) reshape91: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape88, R.shape([batch_size * 1500, 20, 64])) reshape92: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape89, R.shape([batch_size * 1500, 20, 64])) reshape93: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape90, R.shape([batch_size * 1500, 20, 64])) lv15 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape91, reshape92, reshape93), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape94: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv15, R.shape([batch_size, 1500, 20, 64])) reshape95: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape94, R.shape([batch_size, 1500, 1280])) permute_dims70: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_11_self_attn_out_proj_weight, axes=None) matmul69: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape95, permute_dims70, out_dtype="void") add80: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul69, model_encoder_layers_11_self_attn_out_proj_bias) add81: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum10, add80) layer_norm23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add81, model_encoder_layers_11_final_layer_norm_weight, model_encoder_layers_11_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims71: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_11_fc1_weight, axes=None) matmul70: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm23, permute_dims71, out_dtype="void") add82: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul70, model_encoder_layers_11_fc1_bias) gelu13: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add82) permute_dims72: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_11_fc2_weight, axes=None) matmul71: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu13, permute_dims72, out_dtype="void") add83: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul71, model_encoder_layers_11_fc2_bias) add84: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add81, add83) maximum11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add84, R.const(-65504, "float16")) minimum11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum11, R.const(65504, "float16")) layer_norm24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum11, model_encoder_layers_12_self_attn_layer_norm_weight, model_encoder_layers_12_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims73: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_12_self_attn_q_proj_weight, axes=None) matmul72: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm24, permute_dims73, out_dtype="void") add85: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul72, model_encoder_layers_12_self_attn_q_proj_bias) reshape96: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add85, R.shape([batch_size, 1500, 20, 64])) permute_dims74: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_12_self_attn_k_proj_weight, axes=None) matmul73: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm24, permute_dims74, out_dtype="void") reshape97: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul73, R.shape([batch_size, 1500, 20, 64])) permute_dims75: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_12_self_attn_v_proj_weight, axes=None) matmul74: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm24, permute_dims75, out_dtype="void") add86: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul74, model_encoder_layers_12_self_attn_v_proj_bias) reshape98: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add86, R.shape([batch_size, 1500, 20, 64])) reshape99: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape96, R.shape([batch_size * 1500, 20, 64])) reshape100: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape97, R.shape([batch_size * 1500, 20, 64])) reshape101: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape98, R.shape([batch_size * 1500, 20, 64])) lv16 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape99, reshape100, reshape101), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape102: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv16, R.shape([batch_size, 1500, 20, 64])) reshape103: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape102, R.shape([batch_size, 1500, 1280])) permute_dims76: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_12_self_attn_out_proj_weight, axes=None) matmul75: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape103, permute_dims76, out_dtype="void") add87: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul75, model_encoder_layers_12_self_attn_out_proj_bias) add88: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum11, add87) layer_norm25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add88, model_encoder_layers_12_final_layer_norm_weight, model_encoder_layers_12_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims77: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_12_fc1_weight, axes=None) matmul76: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm25, permute_dims77, out_dtype="void") add89: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul76, model_encoder_layers_12_fc1_bias) gelu14: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add89) permute_dims78: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_12_fc2_weight, axes=None) matmul77: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu14, permute_dims78, out_dtype="void") add90: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul77, model_encoder_layers_12_fc2_bias) add91: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add88, add90) maximum12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add91, R.const(-65504, "float16")) minimum12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum12, R.const(65504, "float16")) layer_norm26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum12, model_encoder_layers_13_self_attn_layer_norm_weight, model_encoder_layers_13_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims79: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_13_self_attn_q_proj_weight, axes=None) matmul78: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm26, permute_dims79, out_dtype="void") add92: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul78, model_encoder_layers_13_self_attn_q_proj_bias) reshape104: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add92, R.shape([batch_size, 1500, 20, 64])) permute_dims80: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_13_self_attn_k_proj_weight, axes=None) matmul79: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm26, permute_dims80, out_dtype="void") reshape105: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul79, R.shape([batch_size, 1500, 20, 64])) permute_dims81: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_13_self_attn_v_proj_weight, axes=None) matmul80: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm26, permute_dims81, out_dtype="void") add93: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul80, model_encoder_layers_13_self_attn_v_proj_bias) reshape106: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add93, R.shape([batch_size, 1500, 20, 64])) reshape107: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape104, R.shape([batch_size * 1500, 20, 64])) reshape108: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape105, R.shape([batch_size * 1500, 20, 64])) reshape109: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape106, R.shape([batch_size * 1500, 20, 64])) lv17 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape107, reshape108, reshape109), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape110: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv17, R.shape([batch_size, 1500, 20, 64])) reshape111: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape110, R.shape([batch_size, 1500, 1280])) permute_dims82: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_13_self_attn_out_proj_weight, axes=None) matmul81: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape111, permute_dims82, out_dtype="void") add94: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul81, model_encoder_layers_13_self_attn_out_proj_bias) add95: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum12, add94) layer_norm27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add95, model_encoder_layers_13_final_layer_norm_weight, model_encoder_layers_13_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims83: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_13_fc1_weight, axes=None) matmul82: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm27, permute_dims83, out_dtype="void") add96: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul82, model_encoder_layers_13_fc1_bias) gelu15: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add96) permute_dims84: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_13_fc2_weight, axes=None) matmul83: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu15, permute_dims84, out_dtype="void") add97: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul83, model_encoder_layers_13_fc2_bias) add98: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add95, add97) maximum13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add98, R.const(-65504, "float16")) minimum13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum13, R.const(65504, "float16")) layer_norm28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum13, model_encoder_layers_14_self_attn_layer_norm_weight, model_encoder_layers_14_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims85: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_14_self_attn_q_proj_weight, axes=None) matmul84: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm28, permute_dims85, out_dtype="void") add99: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul84, model_encoder_layers_14_self_attn_q_proj_bias) reshape112: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add99, R.shape([batch_size, 1500, 20, 64])) permute_dims86: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_14_self_attn_k_proj_weight, axes=None) matmul85: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm28, permute_dims86, out_dtype="void") reshape113: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul85, R.shape([batch_size, 1500, 20, 64])) permute_dims87: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_14_self_attn_v_proj_weight, axes=None) matmul86: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm28, permute_dims87, out_dtype="void") add100: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul86, model_encoder_layers_14_self_attn_v_proj_bias) reshape114: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add100, R.shape([batch_size, 1500, 20, 64])) reshape115: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape112, R.shape([batch_size * 1500, 20, 64])) reshape116: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape113, R.shape([batch_size * 1500, 20, 64])) reshape117: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape114, R.shape([batch_size * 1500, 20, 64])) lv18 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape115, reshape116, reshape117), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape118: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv18, R.shape([batch_size, 1500, 20, 64])) reshape119: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape118, R.shape([batch_size, 1500, 1280])) permute_dims88: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_14_self_attn_out_proj_weight, axes=None) matmul87: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape119, permute_dims88, out_dtype="void") add101: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul87, model_encoder_layers_14_self_attn_out_proj_bias) add102: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum13, add101) layer_norm29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add102, model_encoder_layers_14_final_layer_norm_weight, model_encoder_layers_14_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims89: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_14_fc1_weight, axes=None) matmul88: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm29, permute_dims89, out_dtype="void") add103: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul88, model_encoder_layers_14_fc1_bias) gelu16: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add103) permute_dims90: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_14_fc2_weight, axes=None) matmul89: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu16, permute_dims90, out_dtype="void") add104: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul89, model_encoder_layers_14_fc2_bias) add105: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add102, add104) maximum14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add105, R.const(-65504, "float16")) minimum14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum14, R.const(65504, "float16")) layer_norm30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum14, model_encoder_layers_15_self_attn_layer_norm_weight, model_encoder_layers_15_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims91: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_15_self_attn_q_proj_weight, axes=None) matmul90: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm30, permute_dims91, out_dtype="void") add106: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul90, model_encoder_layers_15_self_attn_q_proj_bias) reshape120: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add106, R.shape([batch_size, 1500, 20, 64])) permute_dims92: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_15_self_attn_k_proj_weight, axes=None) matmul91: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm30, permute_dims92, out_dtype="void") reshape121: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul91, R.shape([batch_size, 1500, 20, 64])) permute_dims93: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_15_self_attn_v_proj_weight, axes=None) matmul92: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm30, permute_dims93, out_dtype="void") add107: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul92, model_encoder_layers_15_self_attn_v_proj_bias) reshape122: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add107, R.shape([batch_size, 1500, 20, 64])) reshape123: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape120, R.shape([batch_size * 1500, 20, 64])) reshape124: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape121, R.shape([batch_size * 1500, 20, 64])) reshape125: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape122, R.shape([batch_size * 1500, 20, 64])) lv19 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape123, reshape124, reshape125), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape126: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv19, R.shape([batch_size, 1500, 20, 64])) reshape127: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape126, R.shape([batch_size, 1500, 1280])) permute_dims94: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_15_self_attn_out_proj_weight, axes=None) matmul93: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape127, permute_dims94, out_dtype="void") add108: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul93, model_encoder_layers_15_self_attn_out_proj_bias) add109: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum14, add108) layer_norm31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add109, model_encoder_layers_15_final_layer_norm_weight, model_encoder_layers_15_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims95: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_15_fc1_weight, axes=None) matmul94: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm31, permute_dims95, out_dtype="void") add110: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul94, model_encoder_layers_15_fc1_bias) gelu17: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add110) permute_dims96: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_15_fc2_weight, axes=None) matmul95: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu17, permute_dims96, out_dtype="void") add111: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul95, model_encoder_layers_15_fc2_bias) add112: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add109, add111) maximum15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add112, R.const(-65504, "float16")) minimum15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum15, R.const(65504, "float16")) layer_norm32: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum15, model_encoder_layers_16_self_attn_layer_norm_weight, model_encoder_layers_16_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims97: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_16_self_attn_q_proj_weight, axes=None) matmul96: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm32, permute_dims97, out_dtype="void") add113: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul96, model_encoder_layers_16_self_attn_q_proj_bias) reshape128: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add113, R.shape([batch_size, 1500, 20, 64])) permute_dims98: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_16_self_attn_k_proj_weight, axes=None) matmul97: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm32, permute_dims98, out_dtype="void") reshape129: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul97, R.shape([batch_size, 1500, 20, 64])) permute_dims99: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_16_self_attn_v_proj_weight, axes=None) matmul98: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm32, permute_dims99, out_dtype="void") add114: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul98, model_encoder_layers_16_self_attn_v_proj_bias) reshape130: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add114, R.shape([batch_size, 1500, 20, 64])) reshape131: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape128, R.shape([batch_size * 1500, 20, 64])) reshape132: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape129, R.shape([batch_size * 1500, 20, 64])) reshape133: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape130, R.shape([batch_size * 1500, 20, 64])) lv20 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape131, reshape132, reshape133), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape134: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv20, R.shape([batch_size, 1500, 20, 64])) reshape135: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape134, R.shape([batch_size, 1500, 1280])) permute_dims100: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_16_self_attn_out_proj_weight, axes=None) matmul99: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape135, permute_dims100, out_dtype="void") add115: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul99, model_encoder_layers_16_self_attn_out_proj_bias) add116: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum15, add115) layer_norm33: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add116, model_encoder_layers_16_final_layer_norm_weight, model_encoder_layers_16_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims101: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_16_fc1_weight, axes=None) matmul100: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm33, permute_dims101, out_dtype="void") add117: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul100, model_encoder_layers_16_fc1_bias) gelu18: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add117) permute_dims102: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_16_fc2_weight, axes=None) matmul101: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu18, permute_dims102, out_dtype="void") add118: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul101, model_encoder_layers_16_fc2_bias) add119: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add116, add118) maximum16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add119, R.const(-65504, "float16")) minimum16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum16, R.const(65504, "float16")) layer_norm34: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum16, model_encoder_layers_17_self_attn_layer_norm_weight, model_encoder_layers_17_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims103: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_17_self_attn_q_proj_weight, axes=None) matmul102: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm34, permute_dims103, out_dtype="void") add120: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul102, model_encoder_layers_17_self_attn_q_proj_bias) reshape136: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add120, R.shape([batch_size, 1500, 20, 64])) permute_dims104: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_17_self_attn_k_proj_weight, axes=None) matmul103: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm34, permute_dims104, out_dtype="void") reshape137: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul103, R.shape([batch_size, 1500, 20, 64])) permute_dims105: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_17_self_attn_v_proj_weight, axes=None) matmul104: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm34, permute_dims105, out_dtype="void") add121: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul104, model_encoder_layers_17_self_attn_v_proj_bias) reshape138: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add121, R.shape([batch_size, 1500, 20, 64])) reshape139: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape136, R.shape([batch_size * 1500, 20, 64])) reshape140: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape137, R.shape([batch_size * 1500, 20, 64])) reshape141: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape138, R.shape([batch_size * 1500, 20, 64])) lv21 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape139, reshape140, reshape141), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape142: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv21, R.shape([batch_size, 1500, 20, 64])) reshape143: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape142, R.shape([batch_size, 1500, 1280])) permute_dims106: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_17_self_attn_out_proj_weight, axes=None) matmul105: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape143, permute_dims106, out_dtype="void") add122: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul105, model_encoder_layers_17_self_attn_out_proj_bias) add123: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum16, add122) layer_norm35: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add123, model_encoder_layers_17_final_layer_norm_weight, model_encoder_layers_17_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims107: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_17_fc1_weight, axes=None) matmul106: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm35, permute_dims107, out_dtype="void") add124: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul106, model_encoder_layers_17_fc1_bias) gelu19: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add124) permute_dims108: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_17_fc2_weight, axes=None) matmul107: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu19, permute_dims108, out_dtype="void") add125: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul107, model_encoder_layers_17_fc2_bias) add126: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add123, add125) maximum17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add126, R.const(-65504, "float16")) minimum17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum17, R.const(65504, "float16")) layer_norm36: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum17, model_encoder_layers_18_self_attn_layer_norm_weight, model_encoder_layers_18_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims109: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_18_self_attn_q_proj_weight, axes=None) matmul108: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm36, permute_dims109, out_dtype="void") add127: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul108, model_encoder_layers_18_self_attn_q_proj_bias) reshape144: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add127, R.shape([batch_size, 1500, 20, 64])) permute_dims110: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_18_self_attn_k_proj_weight, axes=None) matmul109: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm36, permute_dims110, out_dtype="void") reshape145: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul109, R.shape([batch_size, 1500, 20, 64])) permute_dims111: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_18_self_attn_v_proj_weight, axes=None) matmul110: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm36, permute_dims111, out_dtype="void") add128: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul110, model_encoder_layers_18_self_attn_v_proj_bias) reshape146: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add128, R.shape([batch_size, 1500, 20, 64])) reshape147: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape144, R.shape([batch_size * 1500, 20, 64])) reshape148: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape145, R.shape([batch_size * 1500, 20, 64])) reshape149: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape146, R.shape([batch_size * 1500, 20, 64])) lv22 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape147, reshape148, reshape149), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape150: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv22, R.shape([batch_size, 1500, 20, 64])) reshape151: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape150, R.shape([batch_size, 1500, 1280])) permute_dims112: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_18_self_attn_out_proj_weight, axes=None) matmul111: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape151, permute_dims112, out_dtype="void") add129: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul111, model_encoder_layers_18_self_attn_out_proj_bias) add130: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum17, add129) layer_norm37: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add130, model_encoder_layers_18_final_layer_norm_weight, model_encoder_layers_18_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims113: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_18_fc1_weight, axes=None) matmul112: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm37, permute_dims113, out_dtype="void") add131: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul112, model_encoder_layers_18_fc1_bias) gelu20: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add131) permute_dims114: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_18_fc2_weight, axes=None) matmul113: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu20, permute_dims114, out_dtype="void") add132: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul113, model_encoder_layers_18_fc2_bias) add133: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add130, add132) maximum18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add133, R.const(-65504, "float16")) minimum18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum18, R.const(65504, "float16")) layer_norm38: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum18, model_encoder_layers_19_self_attn_layer_norm_weight, model_encoder_layers_19_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims115: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_19_self_attn_q_proj_weight, axes=None) matmul114: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm38, permute_dims115, out_dtype="void") add134: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul114, model_encoder_layers_19_self_attn_q_proj_bias) reshape152: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add134, R.shape([batch_size, 1500, 20, 64])) permute_dims116: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_19_self_attn_k_proj_weight, axes=None) matmul115: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm38, permute_dims116, out_dtype="void") reshape153: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul115, R.shape([batch_size, 1500, 20, 64])) permute_dims117: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_19_self_attn_v_proj_weight, axes=None) matmul116: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm38, permute_dims117, out_dtype="void") add135: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul116, model_encoder_layers_19_self_attn_v_proj_bias) reshape154: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add135, R.shape([batch_size, 1500, 20, 64])) reshape155: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape152, R.shape([batch_size * 1500, 20, 64])) reshape156: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape153, R.shape([batch_size * 1500, 20, 64])) reshape157: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape154, R.shape([batch_size * 1500, 20, 64])) lv23 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape155, reshape156, reshape157), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape158: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv23, R.shape([batch_size, 1500, 20, 64])) reshape159: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape158, R.shape([batch_size, 1500, 1280])) permute_dims118: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_19_self_attn_out_proj_weight, axes=None) matmul117: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape159, permute_dims118, out_dtype="void") add136: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul117, model_encoder_layers_19_self_attn_out_proj_bias) add137: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum18, add136) layer_norm39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add137, model_encoder_layers_19_final_layer_norm_weight, model_encoder_layers_19_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims119: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_19_fc1_weight, axes=None) matmul118: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm39, permute_dims119, out_dtype="void") add138: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul118, model_encoder_layers_19_fc1_bias) gelu21: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add138) permute_dims120: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_19_fc2_weight, axes=None) matmul119: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu21, permute_dims120, out_dtype="void") add139: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul119, model_encoder_layers_19_fc2_bias) add140: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add137, add139) maximum19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add140, R.const(-65504, "float16")) minimum19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum19, R.const(65504, "float16")) layer_norm40: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum19, model_encoder_layers_20_self_attn_layer_norm_weight, model_encoder_layers_20_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims121: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_20_self_attn_q_proj_weight, axes=None) matmul120: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm40, permute_dims121, out_dtype="void") add141: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul120, model_encoder_layers_20_self_attn_q_proj_bias) reshape160: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add141, R.shape([batch_size, 1500, 20, 64])) permute_dims122: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_20_self_attn_k_proj_weight, axes=None) matmul121: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm40, permute_dims122, out_dtype="void") reshape161: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul121, R.shape([batch_size, 1500, 20, 64])) permute_dims123: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_20_self_attn_v_proj_weight, axes=None) matmul122: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm40, permute_dims123, out_dtype="void") add142: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul122, model_encoder_layers_20_self_attn_v_proj_bias) reshape162: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add142, R.shape([batch_size, 1500, 20, 64])) reshape163: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape160, R.shape([batch_size * 1500, 20, 64])) reshape164: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape161, R.shape([batch_size * 1500, 20, 64])) reshape165: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape162, R.shape([batch_size * 1500, 20, 64])) lv24 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape163, reshape164, reshape165), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape166: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv24, R.shape([batch_size, 1500, 20, 64])) reshape167: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape166, R.shape([batch_size, 1500, 1280])) permute_dims124: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_20_self_attn_out_proj_weight, axes=None) matmul123: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape167, permute_dims124, out_dtype="void") add143: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul123, model_encoder_layers_20_self_attn_out_proj_bias) add144: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum19, add143) layer_norm41: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add144, model_encoder_layers_20_final_layer_norm_weight, model_encoder_layers_20_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims125: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_20_fc1_weight, axes=None) matmul124: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm41, permute_dims125, out_dtype="void") add145: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul124, model_encoder_layers_20_fc1_bias) gelu22: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add145) permute_dims126: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_20_fc2_weight, axes=None) matmul125: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu22, permute_dims126, out_dtype="void") add146: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul125, model_encoder_layers_20_fc2_bias) add147: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add144, add146) maximum20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add147, R.const(-65504, "float16")) minimum20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum20, R.const(65504, "float16")) layer_norm42: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum20, model_encoder_layers_21_self_attn_layer_norm_weight, model_encoder_layers_21_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims127: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_21_self_attn_q_proj_weight, axes=None) matmul126: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm42, permute_dims127, out_dtype="void") add148: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul126, model_encoder_layers_21_self_attn_q_proj_bias) reshape168: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add148, R.shape([batch_size, 1500, 20, 64])) permute_dims128: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_21_self_attn_k_proj_weight, axes=None) matmul127: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm42, permute_dims128, out_dtype="void") reshape169: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul127, R.shape([batch_size, 1500, 20, 64])) permute_dims129: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_21_self_attn_v_proj_weight, axes=None) matmul128: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm42, permute_dims129, out_dtype="void") add149: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul128, model_encoder_layers_21_self_attn_v_proj_bias) reshape170: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add149, R.shape([batch_size, 1500, 20, 64])) reshape171: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape168, R.shape([batch_size * 1500, 20, 64])) reshape172: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape169, R.shape([batch_size * 1500, 20, 64])) reshape173: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape170, R.shape([batch_size * 1500, 20, 64])) lv25 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape171, reshape172, reshape173), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape174: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv25, R.shape([batch_size, 1500, 20, 64])) reshape175: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape174, R.shape([batch_size, 1500, 1280])) permute_dims130: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_21_self_attn_out_proj_weight, axes=None) matmul129: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape175, permute_dims130, out_dtype="void") add150: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul129, model_encoder_layers_21_self_attn_out_proj_bias) add151: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum20, add150) layer_norm43: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add151, model_encoder_layers_21_final_layer_norm_weight, model_encoder_layers_21_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims131: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_21_fc1_weight, axes=None) matmul130: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm43, permute_dims131, out_dtype="void") add152: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul130, model_encoder_layers_21_fc1_bias) gelu23: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add152) permute_dims132: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_21_fc2_weight, axes=None) matmul131: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu23, permute_dims132, out_dtype="void") add153: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul131, model_encoder_layers_21_fc2_bias) add154: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add151, add153) maximum21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add154, R.const(-65504, "float16")) minimum21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum21, R.const(65504, "float16")) layer_norm44: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum21, model_encoder_layers_22_self_attn_layer_norm_weight, model_encoder_layers_22_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims133: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_22_self_attn_q_proj_weight, axes=None) matmul132: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm44, permute_dims133, out_dtype="void") add155: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul132, model_encoder_layers_22_self_attn_q_proj_bias) reshape176: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add155, R.shape([batch_size, 1500, 20, 64])) permute_dims134: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_22_self_attn_k_proj_weight, axes=None) matmul133: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm44, permute_dims134, out_dtype="void") reshape177: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul133, R.shape([batch_size, 1500, 20, 64])) permute_dims135: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_22_self_attn_v_proj_weight, axes=None) matmul134: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm44, permute_dims135, out_dtype="void") add156: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul134, model_encoder_layers_22_self_attn_v_proj_bias) reshape178: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add156, R.shape([batch_size, 1500, 20, 64])) reshape179: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape176, R.shape([batch_size * 1500, 20, 64])) reshape180: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape177, R.shape([batch_size * 1500, 20, 64])) reshape181: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape178, R.shape([batch_size * 1500, 20, 64])) lv26 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape179, reshape180, reshape181), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape182: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv26, R.shape([batch_size, 1500, 20, 64])) reshape183: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape182, R.shape([batch_size, 1500, 1280])) permute_dims136: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_22_self_attn_out_proj_weight, axes=None) matmul135: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape183, permute_dims136, out_dtype="void") add157: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul135, model_encoder_layers_22_self_attn_out_proj_bias) add158: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum21, add157) layer_norm45: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add158, model_encoder_layers_22_final_layer_norm_weight, model_encoder_layers_22_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims137: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_22_fc1_weight, axes=None) matmul136: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm45, permute_dims137, out_dtype="void") add159: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul136, model_encoder_layers_22_fc1_bias) gelu24: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add159) permute_dims138: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_22_fc2_weight, axes=None) matmul137: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu24, permute_dims138, out_dtype="void") add160: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul137, model_encoder_layers_22_fc2_bias) add161: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add158, add160) maximum22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add161, R.const(-65504, "float16")) minimum22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum22, R.const(65504, "float16")) layer_norm46: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum22, model_encoder_layers_23_self_attn_layer_norm_weight, model_encoder_layers_23_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims139: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_23_self_attn_q_proj_weight, axes=None) matmul138: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm46, permute_dims139, out_dtype="void") add162: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul138, model_encoder_layers_23_self_attn_q_proj_bias) reshape184: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add162, R.shape([batch_size, 1500, 20, 64])) permute_dims140: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_23_self_attn_k_proj_weight, axes=None) matmul139: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm46, permute_dims140, out_dtype="void") reshape185: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul139, R.shape([batch_size, 1500, 20, 64])) permute_dims141: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_23_self_attn_v_proj_weight, axes=None) matmul140: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm46, permute_dims141, out_dtype="void") add163: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul140, model_encoder_layers_23_self_attn_v_proj_bias) reshape186: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add163, R.shape([batch_size, 1500, 20, 64])) reshape187: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape184, R.shape([batch_size * 1500, 20, 64])) reshape188: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape185, R.shape([batch_size * 1500, 20, 64])) reshape189: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape186, R.shape([batch_size * 1500, 20, 64])) lv27 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape187, reshape188, reshape189), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape190: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv27, R.shape([batch_size, 1500, 20, 64])) reshape191: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape190, R.shape([batch_size, 1500, 1280])) permute_dims142: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_23_self_attn_out_proj_weight, axes=None) matmul141: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape191, permute_dims142, out_dtype="void") add164: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul141, model_encoder_layers_23_self_attn_out_proj_bias) add165: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum22, add164) layer_norm47: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add165, model_encoder_layers_23_final_layer_norm_weight, model_encoder_layers_23_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims143: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_23_fc1_weight, axes=None) matmul142: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm47, permute_dims143, out_dtype="void") add166: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul142, model_encoder_layers_23_fc1_bias) gelu25: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add166) permute_dims144: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_23_fc2_weight, axes=None) matmul143: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu25, permute_dims144, out_dtype="void") add167: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul143, model_encoder_layers_23_fc2_bias) add168: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add165, add167) maximum23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add168, R.const(-65504, "float16")) minimum23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum23, R.const(65504, "float16")) layer_norm48: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum23, model_encoder_layers_24_self_attn_layer_norm_weight, model_encoder_layers_24_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims145: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_24_self_attn_q_proj_weight, axes=None) matmul144: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm48, permute_dims145, out_dtype="void") add169: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul144, model_encoder_layers_24_self_attn_q_proj_bias) reshape192: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add169, R.shape([batch_size, 1500, 20, 64])) permute_dims146: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_24_self_attn_k_proj_weight, axes=None) matmul145: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm48, permute_dims146, out_dtype="void") reshape193: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul145, R.shape([batch_size, 1500, 20, 64])) permute_dims147: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_24_self_attn_v_proj_weight, axes=None) matmul146: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm48, permute_dims147, out_dtype="void") add170: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul146, model_encoder_layers_24_self_attn_v_proj_bias) reshape194: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add170, R.shape([batch_size, 1500, 20, 64])) reshape195: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape192, R.shape([batch_size * 1500, 20, 64])) reshape196: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape193, R.shape([batch_size * 1500, 20, 64])) reshape197: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape194, R.shape([batch_size * 1500, 20, 64])) lv28 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape195, reshape196, reshape197), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape198: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv28, R.shape([batch_size, 1500, 20, 64])) reshape199: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape198, R.shape([batch_size, 1500, 1280])) permute_dims148: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_24_self_attn_out_proj_weight, axes=None) matmul147: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape199, permute_dims148, out_dtype="void") add171: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul147, model_encoder_layers_24_self_attn_out_proj_bias) add172: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum23, add171) layer_norm49: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add172, model_encoder_layers_24_final_layer_norm_weight, model_encoder_layers_24_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims149: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_24_fc1_weight, axes=None) matmul148: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm49, permute_dims149, out_dtype="void") add173: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul148, model_encoder_layers_24_fc1_bias) gelu26: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add173) permute_dims150: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_24_fc2_weight, axes=None) matmul149: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu26, permute_dims150, out_dtype="void") add174: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul149, model_encoder_layers_24_fc2_bias) add175: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add172, add174) maximum24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add175, R.const(-65504, "float16")) minimum24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum24, R.const(65504, "float16")) layer_norm50: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum24, model_encoder_layers_25_self_attn_layer_norm_weight, model_encoder_layers_25_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims151: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_25_self_attn_q_proj_weight, axes=None) matmul150: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm50, permute_dims151, out_dtype="void") add176: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul150, model_encoder_layers_25_self_attn_q_proj_bias) reshape200: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add176, R.shape([batch_size, 1500, 20, 64])) permute_dims152: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_25_self_attn_k_proj_weight, axes=None) matmul151: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm50, permute_dims152, out_dtype="void") reshape201: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul151, R.shape([batch_size, 1500, 20, 64])) permute_dims153: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_25_self_attn_v_proj_weight, axes=None) matmul152: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm50, permute_dims153, out_dtype="void") add177: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul152, model_encoder_layers_25_self_attn_v_proj_bias) reshape202: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add177, R.shape([batch_size, 1500, 20, 64])) reshape203: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape200, R.shape([batch_size * 1500, 20, 64])) reshape204: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape201, R.shape([batch_size * 1500, 20, 64])) reshape205: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape202, R.shape([batch_size * 1500, 20, 64])) lv29 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape203, reshape204, reshape205), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape206: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv29, R.shape([batch_size, 1500, 20, 64])) reshape207: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape206, R.shape([batch_size, 1500, 1280])) permute_dims154: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_25_self_attn_out_proj_weight, axes=None) matmul153: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape207, permute_dims154, out_dtype="void") add178: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul153, model_encoder_layers_25_self_attn_out_proj_bias) add179: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum24, add178) layer_norm51: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add179, model_encoder_layers_25_final_layer_norm_weight, model_encoder_layers_25_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims155: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_25_fc1_weight, axes=None) matmul154: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm51, permute_dims155, out_dtype="void") add180: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul154, model_encoder_layers_25_fc1_bias) gelu27: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add180) permute_dims156: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_25_fc2_weight, axes=None) matmul155: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu27, permute_dims156, out_dtype="void") add181: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul155, model_encoder_layers_25_fc2_bias) add182: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add179, add181) maximum25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add182, R.const(-65504, "float16")) minimum25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum25, R.const(65504, "float16")) layer_norm52: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum25, model_encoder_layers_26_self_attn_layer_norm_weight, model_encoder_layers_26_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims157: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_26_self_attn_q_proj_weight, axes=None) matmul156: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm52, permute_dims157, out_dtype="void") add183: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul156, model_encoder_layers_26_self_attn_q_proj_bias) reshape208: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add183, R.shape([batch_size, 1500, 20, 64])) permute_dims158: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_26_self_attn_k_proj_weight, axes=None) matmul157: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm52, permute_dims158, out_dtype="void") reshape209: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul157, R.shape([batch_size, 1500, 20, 64])) permute_dims159: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_26_self_attn_v_proj_weight, axes=None) matmul158: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm52, permute_dims159, out_dtype="void") add184: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul158, model_encoder_layers_26_self_attn_v_proj_bias) reshape210: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add184, R.shape([batch_size, 1500, 20, 64])) reshape211: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape208, R.shape([batch_size * 1500, 20, 64])) reshape212: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape209, R.shape([batch_size * 1500, 20, 64])) reshape213: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape210, R.shape([batch_size * 1500, 20, 64])) lv30 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape211, reshape212, reshape213), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape214: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv30, R.shape([batch_size, 1500, 20, 64])) reshape215: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape214, R.shape([batch_size, 1500, 1280])) permute_dims160: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_26_self_attn_out_proj_weight, axes=None) matmul159: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape215, permute_dims160, out_dtype="void") add185: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul159, model_encoder_layers_26_self_attn_out_proj_bias) add186: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum25, add185) layer_norm53: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add186, model_encoder_layers_26_final_layer_norm_weight, model_encoder_layers_26_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims161: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_26_fc1_weight, axes=None) matmul160: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm53, permute_dims161, out_dtype="void") add187: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul160, model_encoder_layers_26_fc1_bias) gelu28: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add187) permute_dims162: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_26_fc2_weight, axes=None) matmul161: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu28, permute_dims162, out_dtype="void") add188: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul161, model_encoder_layers_26_fc2_bias) add189: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add186, add188) maximum26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add189, R.const(-65504, "float16")) minimum26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum26, R.const(65504, "float16")) layer_norm54: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum26, model_encoder_layers_27_self_attn_layer_norm_weight, model_encoder_layers_27_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims163: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_27_self_attn_q_proj_weight, axes=None) matmul162: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm54, permute_dims163, out_dtype="void") add190: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul162, model_encoder_layers_27_self_attn_q_proj_bias) reshape216: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add190, R.shape([batch_size, 1500, 20, 64])) permute_dims164: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_27_self_attn_k_proj_weight, axes=None) matmul163: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm54, permute_dims164, out_dtype="void") reshape217: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul163, R.shape([batch_size, 1500, 20, 64])) permute_dims165: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_27_self_attn_v_proj_weight, axes=None) matmul164: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm54, permute_dims165, out_dtype="void") add191: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul164, model_encoder_layers_27_self_attn_v_proj_bias) reshape218: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add191, R.shape([batch_size, 1500, 20, 64])) reshape219: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape216, R.shape([batch_size * 1500, 20, 64])) reshape220: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape217, R.shape([batch_size * 1500, 20, 64])) reshape221: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape218, R.shape([batch_size * 1500, 20, 64])) lv31 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape219, reshape220, reshape221), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape222: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv31, R.shape([batch_size, 1500, 20, 64])) reshape223: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape222, R.shape([batch_size, 1500, 1280])) permute_dims166: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_27_self_attn_out_proj_weight, axes=None) matmul165: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape223, permute_dims166, out_dtype="void") add192: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul165, model_encoder_layers_27_self_attn_out_proj_bias) add193: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum26, add192) layer_norm55: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add193, model_encoder_layers_27_final_layer_norm_weight, model_encoder_layers_27_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims167: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_27_fc1_weight, axes=None) matmul166: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm55, permute_dims167, out_dtype="void") add194: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul166, model_encoder_layers_27_fc1_bias) gelu29: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add194) permute_dims168: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_27_fc2_weight, axes=None) matmul167: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu29, permute_dims168, out_dtype="void") add195: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul167, model_encoder_layers_27_fc2_bias) add196: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add193, add195) maximum27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add196, R.const(-65504, "float16")) minimum27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum27, R.const(65504, "float16")) layer_norm56: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum27, model_encoder_layers_28_self_attn_layer_norm_weight, model_encoder_layers_28_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims169: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_28_self_attn_q_proj_weight, axes=None) matmul168: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm56, permute_dims169, out_dtype="void") add197: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul168, model_encoder_layers_28_self_attn_q_proj_bias) reshape224: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add197, R.shape([batch_size, 1500, 20, 64])) permute_dims170: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_28_self_attn_k_proj_weight, axes=None) matmul169: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm56, permute_dims170, out_dtype="void") reshape225: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul169, R.shape([batch_size, 1500, 20, 64])) permute_dims171: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_28_self_attn_v_proj_weight, axes=None) matmul170: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm56, permute_dims171, out_dtype="void") add198: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul170, model_encoder_layers_28_self_attn_v_proj_bias) reshape226: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add198, R.shape([batch_size, 1500, 20, 64])) reshape227: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape224, R.shape([batch_size * 1500, 20, 64])) reshape228: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape225, R.shape([batch_size * 1500, 20, 64])) reshape229: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape226, R.shape([batch_size * 1500, 20, 64])) lv32 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape227, reshape228, reshape229), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape230: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv32, R.shape([batch_size, 1500, 20, 64])) reshape231: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape230, R.shape([batch_size, 1500, 1280])) permute_dims172: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_28_self_attn_out_proj_weight, axes=None) matmul171: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape231, permute_dims172, out_dtype="void") add199: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul171, model_encoder_layers_28_self_attn_out_proj_bias) add200: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum27, add199) layer_norm57: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add200, model_encoder_layers_28_final_layer_norm_weight, model_encoder_layers_28_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims173: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_28_fc1_weight, axes=None) matmul172: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm57, permute_dims173, out_dtype="void") add201: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul172, model_encoder_layers_28_fc1_bias) gelu30: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add201) permute_dims174: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_28_fc2_weight, axes=None) matmul173: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu30, permute_dims174, out_dtype="void") add202: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul173, model_encoder_layers_28_fc2_bias) add203: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add200, add202) maximum28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add203, R.const(-65504, "float16")) minimum28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum28, R.const(65504, "float16")) layer_norm58: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum28, model_encoder_layers_29_self_attn_layer_norm_weight, model_encoder_layers_29_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims175: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_29_self_attn_q_proj_weight, axes=None) matmul174: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm58, permute_dims175, out_dtype="void") add204: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul174, model_encoder_layers_29_self_attn_q_proj_bias) reshape232: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add204, R.shape([batch_size, 1500, 20, 64])) permute_dims176: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_29_self_attn_k_proj_weight, axes=None) matmul175: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm58, permute_dims176, out_dtype="void") reshape233: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul175, R.shape([batch_size, 1500, 20, 64])) permute_dims177: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_29_self_attn_v_proj_weight, axes=None) matmul176: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm58, permute_dims177, out_dtype="void") add205: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul176, model_encoder_layers_29_self_attn_v_proj_bias) reshape234: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add205, R.shape([batch_size, 1500, 20, 64])) reshape235: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape232, R.shape([batch_size * 1500, 20, 64])) reshape236: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape233, R.shape([batch_size * 1500, 20, 64])) reshape237: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape234, R.shape([batch_size * 1500, 20, 64])) lv33 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape235, reshape236, reshape237), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape238: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv33, R.shape([batch_size, 1500, 20, 64])) reshape239: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape238, R.shape([batch_size, 1500, 1280])) permute_dims178: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_29_self_attn_out_proj_weight, axes=None) matmul177: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape239, permute_dims178, out_dtype="void") add206: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul177, model_encoder_layers_29_self_attn_out_proj_bias) add207: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum28, add206) layer_norm59: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add207, model_encoder_layers_29_final_layer_norm_weight, model_encoder_layers_29_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims179: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_29_fc1_weight, axes=None) matmul178: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm59, permute_dims179, out_dtype="void") add208: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul178, model_encoder_layers_29_fc1_bias) gelu31: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add208) permute_dims180: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_29_fc2_weight, axes=None) matmul179: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu31, permute_dims180, out_dtype="void") add209: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul179, model_encoder_layers_29_fc2_bias) add210: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add207, add209) maximum29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add210, R.const(-65504, "float16")) minimum29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum29, R.const(65504, "float16")) layer_norm60: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum29, model_encoder_layers_30_self_attn_layer_norm_weight, model_encoder_layers_30_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims181: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_30_self_attn_q_proj_weight, axes=None) matmul180: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm60, permute_dims181, out_dtype="void") add211: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul180, model_encoder_layers_30_self_attn_q_proj_bias) reshape240: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add211, R.shape([batch_size, 1500, 20, 64])) permute_dims182: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_30_self_attn_k_proj_weight, axes=None) matmul181: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm60, permute_dims182, out_dtype="void") reshape241: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul181, R.shape([batch_size, 1500, 20, 64])) permute_dims183: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_30_self_attn_v_proj_weight, axes=None) matmul182: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm60, permute_dims183, out_dtype="void") add212: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul182, model_encoder_layers_30_self_attn_v_proj_bias) reshape242: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add212, R.shape([batch_size, 1500, 20, 64])) reshape243: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape240, R.shape([batch_size * 1500, 20, 64])) reshape244: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape241, R.shape([batch_size * 1500, 20, 64])) reshape245: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape242, R.shape([batch_size * 1500, 20, 64])) lv34 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape243, reshape244, reshape245), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape246: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv34, R.shape([batch_size, 1500, 20, 64])) reshape247: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape246, R.shape([batch_size, 1500, 1280])) permute_dims184: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_30_self_attn_out_proj_weight, axes=None) matmul183: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape247, permute_dims184, out_dtype="void") add213: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul183, model_encoder_layers_30_self_attn_out_proj_bias) add214: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum29, add213) layer_norm61: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add214, model_encoder_layers_30_final_layer_norm_weight, model_encoder_layers_30_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims185: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_30_fc1_weight, axes=None) matmul184: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm61, permute_dims185, out_dtype="void") add215: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul184, model_encoder_layers_30_fc1_bias) gelu32: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add215) permute_dims186: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_30_fc2_weight, axes=None) matmul185: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu32, permute_dims186, out_dtype="void") add216: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul185, model_encoder_layers_30_fc2_bias) add217: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add214, add216) maximum30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add217, R.const(-65504, "float16")) minimum30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum30, R.const(65504, "float16")) layer_norm62: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum30, model_encoder_layers_31_self_attn_layer_norm_weight, model_encoder_layers_31_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims187: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_31_self_attn_q_proj_weight, axes=None) matmul186: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm62, permute_dims187, out_dtype="void") add218: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul186, model_encoder_layers_31_self_attn_q_proj_bias) reshape248: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add218, R.shape([batch_size, 1500, 20, 64])) permute_dims188: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_31_self_attn_k_proj_weight, axes=None) matmul187: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm62, permute_dims188, out_dtype="void") reshape249: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(matmul187, R.shape([batch_size, 1500, 20, 64])) permute_dims189: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_31_self_attn_v_proj_weight, axes=None) matmul188: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(layer_norm62, permute_dims189, out_dtype="void") add219: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul188, model_encoder_layers_31_self_attn_v_proj_bias) reshape250: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(add219, R.shape([batch_size, 1500, 20, 64])) reshape251: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape248, R.shape([batch_size * 1500, 20, 64])) reshape252: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape249, R.shape([batch_size * 1500, 20, 64])) reshape253: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape250, R.shape([batch_size * 1500, 20, 64])) lv35 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape251, reshape252, reshape253), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) reshape254: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv35, R.shape([batch_size, 1500, 20, 64])) reshape255: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape254, R.shape([batch_size, 1500, 1280])) permute_dims190: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_31_self_attn_out_proj_weight, axes=None) matmul189: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(reshape255, permute_dims190, out_dtype="void") add220: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul189, model_encoder_layers_31_self_attn_out_proj_bias) add221: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum30, add220) layer_norm63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add221, model_encoder_layers_31_final_layer_norm_weight, model_encoder_layers_31_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims191: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_encoder_layers_31_fc1_weight, axes=None) matmul190: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.matmul(layer_norm63, permute_dims191, out_dtype="void") add222: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.add(matmul190, model_encoder_layers_31_fc1_bias) gelu33: R.Tensor((batch_size, 1500, 5120), dtype="float16") = R.nn.gelu(add222) permute_dims192: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_encoder_layers_31_fc2_weight, axes=None) matmul191: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.matmul(gelu33, permute_dims192, out_dtype="void") add223: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(matmul191, model_encoder_layers_31_fc2_bias) add224: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add221, add223) maximum31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add224, R.const(-65504, "float16")) minimum31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum31, R.const(65504, "float16")) layer_norm64: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum31, model_encoder_layer_norm_weight, model_encoder_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) gv: R.Tensor((batch_size, 1500, 1280), dtype="float16") = layer_norm64 R.output(gv) return gv @R.function def batch_prefill(input_ids: R.Tensor((1, "seq_len"), dtype="int32"), logit_positions: R.Tensor(("batch_size",), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor((1, "batch_size", 51866), dtype="float32"): batch_size = T.int64() seq_len = T.int64() R.func_attr({"num_input": 3, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) with R.dataflow(): model_encoder_conv1_weight2: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] model_encoder_conv1_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1] model_encoder_conv2_weight2: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] model_encoder_conv2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[3] model_encoder_embed_positions_weight2: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] model_encoder_layers_0_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] model_encoder_layers_0_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] model_encoder_layers_0_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[7] model_encoder_layers_0_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] model_encoder_layers_0_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[9] model_encoder_layers_0_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] model_encoder_layers_0_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[11] model_encoder_layers_0_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[12] model_encoder_layers_0_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[13] model_encoder_layers_0_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] model_encoder_layers_0_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[15] model_encoder_layers_0_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] model_encoder_layers_0_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[17] model_encoder_layers_0_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[18] model_encoder_layers_0_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[19] model_encoder_layers_1_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] model_encoder_layers_1_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] model_encoder_layers_1_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[22] model_encoder_layers_1_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] model_encoder_layers_1_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[24] model_encoder_layers_1_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] model_encoder_layers_1_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[26] model_encoder_layers_1_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[27] model_encoder_layers_1_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[28] model_encoder_layers_1_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] model_encoder_layers_1_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[30] model_encoder_layers_1_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] model_encoder_layers_1_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[32] model_encoder_layers_1_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[33] model_encoder_layers_1_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[34] model_encoder_layers_2_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] model_encoder_layers_2_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] model_encoder_layers_2_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[37] model_encoder_layers_2_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] model_encoder_layers_2_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[39] model_encoder_layers_2_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] model_encoder_layers_2_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[41] model_encoder_layers_2_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[42] model_encoder_layers_2_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[43] model_encoder_layers_2_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] model_encoder_layers_2_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[45] model_encoder_layers_2_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] model_encoder_layers_2_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[47] model_encoder_layers_2_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[48] model_encoder_layers_2_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[49] model_encoder_layers_3_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] model_encoder_layers_3_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] model_encoder_layers_3_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[52] model_encoder_layers_3_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] model_encoder_layers_3_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[54] model_encoder_layers_3_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] model_encoder_layers_3_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[56] model_encoder_layers_3_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[57] model_encoder_layers_3_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[58] model_encoder_layers_3_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] model_encoder_layers_3_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[60] model_encoder_layers_3_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] model_encoder_layers_3_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[62] model_encoder_layers_3_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[63] model_encoder_layers_3_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[64] model_encoder_layers_4_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] model_encoder_layers_4_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] model_encoder_layers_4_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[67] model_encoder_layers_4_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] model_encoder_layers_4_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[69] model_encoder_layers_4_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] model_encoder_layers_4_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[71] model_encoder_layers_4_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[72] model_encoder_layers_4_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[73] model_encoder_layers_4_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] model_encoder_layers_4_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[75] model_encoder_layers_4_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] model_encoder_layers_4_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[77] model_encoder_layers_4_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[78] model_encoder_layers_4_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[79] model_encoder_layers_5_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] model_encoder_layers_5_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] model_encoder_layers_5_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[82] model_encoder_layers_5_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] model_encoder_layers_5_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[84] model_encoder_layers_5_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] model_encoder_layers_5_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[86] model_encoder_layers_5_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[87] model_encoder_layers_5_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[88] model_encoder_layers_5_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] model_encoder_layers_5_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[90] model_encoder_layers_5_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] model_encoder_layers_5_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[92] model_encoder_layers_5_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[93] model_encoder_layers_5_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[94] model_encoder_layers_6_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] model_encoder_layers_6_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] model_encoder_layers_6_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[97] model_encoder_layers_6_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] model_encoder_layers_6_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[99] model_encoder_layers_6_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] model_encoder_layers_6_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[101] model_encoder_layers_6_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[102] model_encoder_layers_6_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[103] model_encoder_layers_6_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] model_encoder_layers_6_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[105] model_encoder_layers_6_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] model_encoder_layers_6_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[107] model_encoder_layers_6_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[108] model_encoder_layers_6_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[109] model_encoder_layers_7_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] model_encoder_layers_7_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] model_encoder_layers_7_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[112] model_encoder_layers_7_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] model_encoder_layers_7_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[114] model_encoder_layers_7_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] model_encoder_layers_7_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[116] model_encoder_layers_7_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[117] model_encoder_layers_7_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[118] model_encoder_layers_7_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] model_encoder_layers_7_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[120] model_encoder_layers_7_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] model_encoder_layers_7_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[122] model_encoder_layers_7_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[123] model_encoder_layers_7_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[124] model_encoder_layers_8_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] model_encoder_layers_8_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] model_encoder_layers_8_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[127] model_encoder_layers_8_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] model_encoder_layers_8_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[129] model_encoder_layers_8_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] model_encoder_layers_8_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[131] model_encoder_layers_8_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[132] model_encoder_layers_8_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[133] model_encoder_layers_8_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] model_encoder_layers_8_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[135] model_encoder_layers_8_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] model_encoder_layers_8_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[137] model_encoder_layers_8_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[138] model_encoder_layers_8_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[139] model_encoder_layers_9_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] model_encoder_layers_9_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] model_encoder_layers_9_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[142] model_encoder_layers_9_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] model_encoder_layers_9_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[144] model_encoder_layers_9_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] model_encoder_layers_9_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[146] model_encoder_layers_9_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[147] model_encoder_layers_9_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[148] model_encoder_layers_9_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] model_encoder_layers_9_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[150] model_encoder_layers_9_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] model_encoder_layers_9_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[152] model_encoder_layers_9_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[153] model_encoder_layers_9_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[154] model_encoder_layers_10_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] model_encoder_layers_10_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] model_encoder_layers_10_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[157] model_encoder_layers_10_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] model_encoder_layers_10_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[159] model_encoder_layers_10_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] model_encoder_layers_10_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[161] model_encoder_layers_10_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[162] model_encoder_layers_10_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[163] model_encoder_layers_10_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] model_encoder_layers_10_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[165] model_encoder_layers_10_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] model_encoder_layers_10_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[167] model_encoder_layers_10_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[168] model_encoder_layers_10_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[169] model_encoder_layers_11_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] model_encoder_layers_11_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] model_encoder_layers_11_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[172] model_encoder_layers_11_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] model_encoder_layers_11_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[174] model_encoder_layers_11_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] model_encoder_layers_11_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[176] model_encoder_layers_11_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[177] model_encoder_layers_11_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[178] model_encoder_layers_11_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] model_encoder_layers_11_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[180] model_encoder_layers_11_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] model_encoder_layers_11_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[182] model_encoder_layers_11_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[183] model_encoder_layers_11_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[184] model_encoder_layers_12_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] model_encoder_layers_12_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] model_encoder_layers_12_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[187] model_encoder_layers_12_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] model_encoder_layers_12_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[189] model_encoder_layers_12_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] model_encoder_layers_12_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[191] model_encoder_layers_12_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[192] model_encoder_layers_12_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[193] model_encoder_layers_12_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] model_encoder_layers_12_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[195] model_encoder_layers_12_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] model_encoder_layers_12_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[197] model_encoder_layers_12_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[198] model_encoder_layers_12_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[199] model_encoder_layers_13_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] model_encoder_layers_13_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] model_encoder_layers_13_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[202] model_encoder_layers_13_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] model_encoder_layers_13_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[204] model_encoder_layers_13_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] model_encoder_layers_13_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[206] model_encoder_layers_13_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[207] model_encoder_layers_13_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[208] model_encoder_layers_13_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] model_encoder_layers_13_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[210] model_encoder_layers_13_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] model_encoder_layers_13_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[212] model_encoder_layers_13_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[213] model_encoder_layers_13_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[214] model_encoder_layers_14_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] model_encoder_layers_14_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] model_encoder_layers_14_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[217] model_encoder_layers_14_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] model_encoder_layers_14_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[219] model_encoder_layers_14_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] model_encoder_layers_14_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[221] model_encoder_layers_14_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[222] model_encoder_layers_14_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[223] model_encoder_layers_14_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] model_encoder_layers_14_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[225] model_encoder_layers_14_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] model_encoder_layers_14_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[227] model_encoder_layers_14_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[228] model_encoder_layers_14_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[229] model_encoder_layers_15_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] model_encoder_layers_15_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] model_encoder_layers_15_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[232] model_encoder_layers_15_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] model_encoder_layers_15_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[234] model_encoder_layers_15_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] model_encoder_layers_15_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[236] model_encoder_layers_15_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[237] model_encoder_layers_15_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[238] model_encoder_layers_15_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] model_encoder_layers_15_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[240] model_encoder_layers_15_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] model_encoder_layers_15_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[242] model_encoder_layers_15_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[243] model_encoder_layers_15_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[244] model_encoder_layers_16_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] model_encoder_layers_16_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] model_encoder_layers_16_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[247] model_encoder_layers_16_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] model_encoder_layers_16_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[249] model_encoder_layers_16_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] model_encoder_layers_16_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[251] model_encoder_layers_16_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[252] model_encoder_layers_16_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[253] model_encoder_layers_16_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] model_encoder_layers_16_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[255] model_encoder_layers_16_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] model_encoder_layers_16_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[257] model_encoder_layers_16_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[258] model_encoder_layers_16_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[259] model_encoder_layers_17_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] model_encoder_layers_17_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] model_encoder_layers_17_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[262] model_encoder_layers_17_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] model_encoder_layers_17_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[264] model_encoder_layers_17_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] model_encoder_layers_17_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[266] model_encoder_layers_17_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[267] model_encoder_layers_17_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[268] model_encoder_layers_17_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] model_encoder_layers_17_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[270] model_encoder_layers_17_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] model_encoder_layers_17_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[272] model_encoder_layers_17_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[273] model_encoder_layers_17_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[274] model_encoder_layers_18_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] model_encoder_layers_18_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] model_encoder_layers_18_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[277] model_encoder_layers_18_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] model_encoder_layers_18_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[279] model_encoder_layers_18_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] model_encoder_layers_18_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[281] model_encoder_layers_18_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[282] model_encoder_layers_18_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[283] model_encoder_layers_18_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] model_encoder_layers_18_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[285] model_encoder_layers_18_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] model_encoder_layers_18_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[287] model_encoder_layers_18_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[288] model_encoder_layers_18_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[289] model_encoder_layers_19_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] model_encoder_layers_19_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] model_encoder_layers_19_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[292] model_encoder_layers_19_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] model_encoder_layers_19_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[294] model_encoder_layers_19_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] model_encoder_layers_19_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[296] model_encoder_layers_19_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[297] model_encoder_layers_19_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[298] model_encoder_layers_19_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] model_encoder_layers_19_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[300] model_encoder_layers_19_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] model_encoder_layers_19_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[302] model_encoder_layers_19_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[303] model_encoder_layers_19_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[304] model_encoder_layers_20_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] model_encoder_layers_20_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] model_encoder_layers_20_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[307] model_encoder_layers_20_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] model_encoder_layers_20_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[309] model_encoder_layers_20_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] model_encoder_layers_20_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[311] model_encoder_layers_20_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[312] model_encoder_layers_20_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[313] model_encoder_layers_20_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] model_encoder_layers_20_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[315] model_encoder_layers_20_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] model_encoder_layers_20_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[317] model_encoder_layers_20_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[318] model_encoder_layers_20_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[319] model_encoder_layers_21_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] model_encoder_layers_21_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] model_encoder_layers_21_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[322] model_encoder_layers_21_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] model_encoder_layers_21_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[324] model_encoder_layers_21_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] model_encoder_layers_21_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[326] model_encoder_layers_21_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[327] model_encoder_layers_21_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[328] model_encoder_layers_21_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] model_encoder_layers_21_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[330] model_encoder_layers_21_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] model_encoder_layers_21_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[332] model_encoder_layers_21_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[333] model_encoder_layers_21_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[334] model_encoder_layers_22_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] model_encoder_layers_22_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] model_encoder_layers_22_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[337] model_encoder_layers_22_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] model_encoder_layers_22_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[339] model_encoder_layers_22_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] model_encoder_layers_22_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[341] model_encoder_layers_22_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[342] model_encoder_layers_22_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[343] model_encoder_layers_22_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] model_encoder_layers_22_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[345] model_encoder_layers_22_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] model_encoder_layers_22_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[347] model_encoder_layers_22_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[348] model_encoder_layers_22_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[349] model_encoder_layers_23_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] model_encoder_layers_23_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] model_encoder_layers_23_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[352] model_encoder_layers_23_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] model_encoder_layers_23_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[354] model_encoder_layers_23_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] model_encoder_layers_23_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[356] model_encoder_layers_23_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[357] model_encoder_layers_23_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[358] model_encoder_layers_23_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] model_encoder_layers_23_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[360] model_encoder_layers_23_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] model_encoder_layers_23_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[362] model_encoder_layers_23_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[363] model_encoder_layers_23_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[364] model_encoder_layers_24_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] model_encoder_layers_24_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] model_encoder_layers_24_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[367] model_encoder_layers_24_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] model_encoder_layers_24_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[369] model_encoder_layers_24_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] model_encoder_layers_24_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[371] model_encoder_layers_24_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[372] model_encoder_layers_24_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[373] model_encoder_layers_24_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] model_encoder_layers_24_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[375] model_encoder_layers_24_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] model_encoder_layers_24_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[377] model_encoder_layers_24_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[378] model_encoder_layers_24_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[379] model_encoder_layers_25_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] model_encoder_layers_25_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] model_encoder_layers_25_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[382] model_encoder_layers_25_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] model_encoder_layers_25_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[384] model_encoder_layers_25_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] model_encoder_layers_25_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[386] model_encoder_layers_25_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[387] model_encoder_layers_25_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[388] model_encoder_layers_25_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] model_encoder_layers_25_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[390] model_encoder_layers_25_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] model_encoder_layers_25_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[392] model_encoder_layers_25_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[393] model_encoder_layers_25_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[394] model_encoder_layers_26_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] model_encoder_layers_26_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] model_encoder_layers_26_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[397] model_encoder_layers_26_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] model_encoder_layers_26_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[399] model_encoder_layers_26_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] model_encoder_layers_26_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[401] model_encoder_layers_26_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[402] model_encoder_layers_26_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[403] model_encoder_layers_26_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] model_encoder_layers_26_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[405] model_encoder_layers_26_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] model_encoder_layers_26_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[407] model_encoder_layers_26_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[408] model_encoder_layers_26_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[409] model_encoder_layers_27_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] model_encoder_layers_27_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] model_encoder_layers_27_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[412] model_encoder_layers_27_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] model_encoder_layers_27_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[414] model_encoder_layers_27_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] model_encoder_layers_27_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[416] model_encoder_layers_27_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[417] model_encoder_layers_27_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[418] model_encoder_layers_27_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] model_encoder_layers_27_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[420] model_encoder_layers_27_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] model_encoder_layers_27_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[422] model_encoder_layers_27_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[423] model_encoder_layers_27_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[424] model_encoder_layers_28_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] model_encoder_layers_28_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] model_encoder_layers_28_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[427] model_encoder_layers_28_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] model_encoder_layers_28_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[429] model_encoder_layers_28_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] model_encoder_layers_28_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[431] model_encoder_layers_28_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[432] model_encoder_layers_28_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[433] model_encoder_layers_28_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] model_encoder_layers_28_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[435] model_encoder_layers_28_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] model_encoder_layers_28_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[437] model_encoder_layers_28_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[438] model_encoder_layers_28_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[439] model_encoder_layers_29_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] model_encoder_layers_29_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] model_encoder_layers_29_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[442] model_encoder_layers_29_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] model_encoder_layers_29_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[444] model_encoder_layers_29_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] model_encoder_layers_29_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[446] model_encoder_layers_29_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[447] model_encoder_layers_29_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[448] model_encoder_layers_29_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] model_encoder_layers_29_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[450] model_encoder_layers_29_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] model_encoder_layers_29_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[452] model_encoder_layers_29_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[453] model_encoder_layers_29_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[454] model_encoder_layers_30_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] model_encoder_layers_30_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] model_encoder_layers_30_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[457] model_encoder_layers_30_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] model_encoder_layers_30_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[459] model_encoder_layers_30_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] model_encoder_layers_30_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[461] model_encoder_layers_30_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[462] model_encoder_layers_30_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[463] model_encoder_layers_30_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] model_encoder_layers_30_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[465] model_encoder_layers_30_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] model_encoder_layers_30_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[467] model_encoder_layers_30_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[468] model_encoder_layers_30_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[469] model_encoder_layers_31_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] model_encoder_layers_31_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] model_encoder_layers_31_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[472] model_encoder_layers_31_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] model_encoder_layers_31_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[474] model_encoder_layers_31_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] model_encoder_layers_31_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[476] model_encoder_layers_31_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[477] model_encoder_layers_31_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[478] model_encoder_layers_31_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] model_encoder_layers_31_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[480] model_encoder_layers_31_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] model_encoder_layers_31_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[482] model_encoder_layers_31_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[483] model_encoder_layers_31_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[484] model_encoder_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[485] model_encoder_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[486] model_decoder_embed_tokens_weight2: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] model_decoder_embed_positions_weight2: R.Tensor((448, 1280), dtype="float16") = packed_params[488] model_decoder_layers_0_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] model_decoder_layers_0_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] model_decoder_layers_0_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[491] model_decoder_layers_0_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] model_decoder_layers_0_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[493] model_decoder_layers_0_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] model_decoder_layers_0_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[495] model_decoder_layers_0_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[496] model_decoder_layers_0_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[497] model_decoder_layers_0_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] model_decoder_layers_0_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] model_decoder_layers_0_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[500] model_decoder_layers_0_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] model_decoder_layers_0_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[502] model_decoder_layers_0_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] model_decoder_layers_0_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[504] model_decoder_layers_0_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[505] model_decoder_layers_0_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[506] model_decoder_layers_0_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] model_decoder_layers_0_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[508] model_decoder_layers_0_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] model_decoder_layers_0_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[510] model_decoder_layers_0_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[511] model_decoder_layers_0_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[512] model_decoder_layers_1_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] model_decoder_layers_1_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] model_decoder_layers_1_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[515] model_decoder_layers_1_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] model_decoder_layers_1_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[517] model_decoder_layers_1_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] model_decoder_layers_1_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[519] model_decoder_layers_1_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[520] model_decoder_layers_1_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[521] model_decoder_layers_1_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] model_decoder_layers_1_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] model_decoder_layers_1_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[524] model_decoder_layers_1_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] model_decoder_layers_1_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[526] model_decoder_layers_1_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] model_decoder_layers_1_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[528] model_decoder_layers_1_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[529] model_decoder_layers_1_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[530] model_decoder_layers_1_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] model_decoder_layers_1_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[532] model_decoder_layers_1_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] model_decoder_layers_1_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[534] model_decoder_layers_1_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[535] model_decoder_layers_1_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[536] model_decoder_layers_2_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] model_decoder_layers_2_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] model_decoder_layers_2_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[539] model_decoder_layers_2_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] model_decoder_layers_2_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[541] model_decoder_layers_2_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] model_decoder_layers_2_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[543] model_decoder_layers_2_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[544] model_decoder_layers_2_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[545] model_decoder_layers_2_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] model_decoder_layers_2_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] model_decoder_layers_2_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[548] model_decoder_layers_2_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] model_decoder_layers_2_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[550] model_decoder_layers_2_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] model_decoder_layers_2_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[552] model_decoder_layers_2_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[553] model_decoder_layers_2_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[554] model_decoder_layers_2_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] model_decoder_layers_2_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[556] model_decoder_layers_2_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] model_decoder_layers_2_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[558] model_decoder_layers_2_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[559] model_decoder_layers_2_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[560] model_decoder_layers_3_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] model_decoder_layers_3_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] model_decoder_layers_3_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[563] model_decoder_layers_3_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] model_decoder_layers_3_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[565] model_decoder_layers_3_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] model_decoder_layers_3_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[567] model_decoder_layers_3_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[568] model_decoder_layers_3_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[569] model_decoder_layers_3_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] model_decoder_layers_3_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] model_decoder_layers_3_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[572] model_decoder_layers_3_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] model_decoder_layers_3_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[574] model_decoder_layers_3_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] model_decoder_layers_3_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[576] model_decoder_layers_3_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[577] model_decoder_layers_3_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[578] model_decoder_layers_3_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] model_decoder_layers_3_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[580] model_decoder_layers_3_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] model_decoder_layers_3_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[582] model_decoder_layers_3_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[583] model_decoder_layers_3_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[584] model_decoder_layers_4_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] model_decoder_layers_4_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] model_decoder_layers_4_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[587] model_decoder_layers_4_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] model_decoder_layers_4_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[589] model_decoder_layers_4_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] model_decoder_layers_4_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[591] model_decoder_layers_4_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[592] model_decoder_layers_4_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[593] model_decoder_layers_4_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] model_decoder_layers_4_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] model_decoder_layers_4_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[596] model_decoder_layers_4_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] model_decoder_layers_4_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[598] model_decoder_layers_4_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] model_decoder_layers_4_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[600] model_decoder_layers_4_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[601] model_decoder_layers_4_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[602] model_decoder_layers_4_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] model_decoder_layers_4_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[604] model_decoder_layers_4_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] model_decoder_layers_4_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[606] model_decoder_layers_4_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[607] model_decoder_layers_4_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[608] model_decoder_layers_5_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] model_decoder_layers_5_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] model_decoder_layers_5_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[611] model_decoder_layers_5_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] model_decoder_layers_5_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[613] model_decoder_layers_5_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] model_decoder_layers_5_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[615] model_decoder_layers_5_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[616] model_decoder_layers_5_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[617] model_decoder_layers_5_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] model_decoder_layers_5_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] model_decoder_layers_5_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[620] model_decoder_layers_5_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] model_decoder_layers_5_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[622] model_decoder_layers_5_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] model_decoder_layers_5_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[624] model_decoder_layers_5_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[625] model_decoder_layers_5_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[626] model_decoder_layers_5_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] model_decoder_layers_5_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[628] model_decoder_layers_5_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] model_decoder_layers_5_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[630] model_decoder_layers_5_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[631] model_decoder_layers_5_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[632] model_decoder_layers_6_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] model_decoder_layers_6_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] model_decoder_layers_6_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[635] model_decoder_layers_6_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] model_decoder_layers_6_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[637] model_decoder_layers_6_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] model_decoder_layers_6_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[639] model_decoder_layers_6_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[640] model_decoder_layers_6_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[641] model_decoder_layers_6_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] model_decoder_layers_6_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] model_decoder_layers_6_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[644] model_decoder_layers_6_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] model_decoder_layers_6_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[646] model_decoder_layers_6_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] model_decoder_layers_6_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[648] model_decoder_layers_6_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[649] model_decoder_layers_6_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[650] model_decoder_layers_6_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] model_decoder_layers_6_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[652] model_decoder_layers_6_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] model_decoder_layers_6_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[654] model_decoder_layers_6_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[655] model_decoder_layers_6_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[656] model_decoder_layers_7_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] model_decoder_layers_7_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] model_decoder_layers_7_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[659] model_decoder_layers_7_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] model_decoder_layers_7_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[661] model_decoder_layers_7_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] model_decoder_layers_7_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[663] model_decoder_layers_7_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[664] model_decoder_layers_7_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[665] model_decoder_layers_7_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] model_decoder_layers_7_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] model_decoder_layers_7_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[668] model_decoder_layers_7_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] model_decoder_layers_7_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[670] model_decoder_layers_7_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] model_decoder_layers_7_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[672] model_decoder_layers_7_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[673] model_decoder_layers_7_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[674] model_decoder_layers_7_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] model_decoder_layers_7_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[676] model_decoder_layers_7_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] model_decoder_layers_7_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[678] model_decoder_layers_7_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[679] model_decoder_layers_7_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[680] model_decoder_layers_8_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] model_decoder_layers_8_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] model_decoder_layers_8_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[683] model_decoder_layers_8_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] model_decoder_layers_8_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[685] model_decoder_layers_8_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] model_decoder_layers_8_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[687] model_decoder_layers_8_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[688] model_decoder_layers_8_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[689] model_decoder_layers_8_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] model_decoder_layers_8_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] model_decoder_layers_8_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[692] model_decoder_layers_8_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] model_decoder_layers_8_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[694] model_decoder_layers_8_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] model_decoder_layers_8_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[696] model_decoder_layers_8_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[697] model_decoder_layers_8_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[698] model_decoder_layers_8_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] model_decoder_layers_8_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[700] model_decoder_layers_8_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] model_decoder_layers_8_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[702] model_decoder_layers_8_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[703] model_decoder_layers_8_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[704] model_decoder_layers_9_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] model_decoder_layers_9_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] model_decoder_layers_9_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[707] model_decoder_layers_9_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] model_decoder_layers_9_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[709] model_decoder_layers_9_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] model_decoder_layers_9_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[711] model_decoder_layers_9_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[712] model_decoder_layers_9_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[713] model_decoder_layers_9_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] model_decoder_layers_9_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] model_decoder_layers_9_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[716] model_decoder_layers_9_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] model_decoder_layers_9_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[718] model_decoder_layers_9_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] model_decoder_layers_9_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[720] model_decoder_layers_9_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[721] model_decoder_layers_9_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[722] model_decoder_layers_9_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] model_decoder_layers_9_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[724] model_decoder_layers_9_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] model_decoder_layers_9_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[726] model_decoder_layers_9_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[727] model_decoder_layers_9_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[728] model_decoder_layers_10_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] model_decoder_layers_10_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] model_decoder_layers_10_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[731] model_decoder_layers_10_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] model_decoder_layers_10_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[733] model_decoder_layers_10_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] model_decoder_layers_10_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[735] model_decoder_layers_10_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[736] model_decoder_layers_10_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[737] model_decoder_layers_10_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] model_decoder_layers_10_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] model_decoder_layers_10_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[740] model_decoder_layers_10_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] model_decoder_layers_10_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[742] model_decoder_layers_10_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] model_decoder_layers_10_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[744] model_decoder_layers_10_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[745] model_decoder_layers_10_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[746] model_decoder_layers_10_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] model_decoder_layers_10_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[748] model_decoder_layers_10_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] model_decoder_layers_10_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[750] model_decoder_layers_10_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[751] model_decoder_layers_10_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[752] model_decoder_layers_11_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] model_decoder_layers_11_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] model_decoder_layers_11_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[755] model_decoder_layers_11_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] model_decoder_layers_11_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[757] model_decoder_layers_11_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] model_decoder_layers_11_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[759] model_decoder_layers_11_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[760] model_decoder_layers_11_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[761] model_decoder_layers_11_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] model_decoder_layers_11_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] model_decoder_layers_11_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[764] model_decoder_layers_11_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] model_decoder_layers_11_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[766] model_decoder_layers_11_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] model_decoder_layers_11_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[768] model_decoder_layers_11_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[769] model_decoder_layers_11_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[770] model_decoder_layers_11_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] model_decoder_layers_11_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[772] model_decoder_layers_11_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] model_decoder_layers_11_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[774] model_decoder_layers_11_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[775] model_decoder_layers_11_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[776] model_decoder_layers_12_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] model_decoder_layers_12_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] model_decoder_layers_12_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[779] model_decoder_layers_12_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] model_decoder_layers_12_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[781] model_decoder_layers_12_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] model_decoder_layers_12_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[783] model_decoder_layers_12_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[784] model_decoder_layers_12_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[785] model_decoder_layers_12_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] model_decoder_layers_12_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] model_decoder_layers_12_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[788] model_decoder_layers_12_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] model_decoder_layers_12_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[790] model_decoder_layers_12_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] model_decoder_layers_12_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[792] model_decoder_layers_12_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[793] model_decoder_layers_12_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[794] model_decoder_layers_12_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] model_decoder_layers_12_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[796] model_decoder_layers_12_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] model_decoder_layers_12_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[798] model_decoder_layers_12_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[799] model_decoder_layers_12_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[800] model_decoder_layers_13_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] model_decoder_layers_13_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] model_decoder_layers_13_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[803] model_decoder_layers_13_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] model_decoder_layers_13_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[805] model_decoder_layers_13_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] model_decoder_layers_13_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[807] model_decoder_layers_13_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[808] model_decoder_layers_13_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[809] model_decoder_layers_13_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] model_decoder_layers_13_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] model_decoder_layers_13_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[812] model_decoder_layers_13_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] model_decoder_layers_13_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[814] model_decoder_layers_13_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] model_decoder_layers_13_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[816] model_decoder_layers_13_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[817] model_decoder_layers_13_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[818] model_decoder_layers_13_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] model_decoder_layers_13_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[820] model_decoder_layers_13_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] model_decoder_layers_13_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[822] model_decoder_layers_13_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[823] model_decoder_layers_13_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[824] model_decoder_layers_14_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] model_decoder_layers_14_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] model_decoder_layers_14_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[827] model_decoder_layers_14_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] model_decoder_layers_14_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[829] model_decoder_layers_14_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] model_decoder_layers_14_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[831] model_decoder_layers_14_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[832] model_decoder_layers_14_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[833] model_decoder_layers_14_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] model_decoder_layers_14_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] model_decoder_layers_14_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[836] model_decoder_layers_14_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] model_decoder_layers_14_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[838] model_decoder_layers_14_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] model_decoder_layers_14_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[840] model_decoder_layers_14_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[841] model_decoder_layers_14_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[842] model_decoder_layers_14_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] model_decoder_layers_14_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[844] model_decoder_layers_14_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] model_decoder_layers_14_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[846] model_decoder_layers_14_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[847] model_decoder_layers_14_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[848] model_decoder_layers_15_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] model_decoder_layers_15_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] model_decoder_layers_15_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[851] model_decoder_layers_15_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] model_decoder_layers_15_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[853] model_decoder_layers_15_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] model_decoder_layers_15_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[855] model_decoder_layers_15_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[856] model_decoder_layers_15_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[857] model_decoder_layers_15_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] model_decoder_layers_15_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] model_decoder_layers_15_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[860] model_decoder_layers_15_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] model_decoder_layers_15_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[862] model_decoder_layers_15_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] model_decoder_layers_15_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[864] model_decoder_layers_15_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[865] model_decoder_layers_15_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[866] model_decoder_layers_15_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] model_decoder_layers_15_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[868] model_decoder_layers_15_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] model_decoder_layers_15_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[870] model_decoder_layers_15_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[871] model_decoder_layers_15_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[872] model_decoder_layers_16_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] model_decoder_layers_16_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] model_decoder_layers_16_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[875] model_decoder_layers_16_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] model_decoder_layers_16_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[877] model_decoder_layers_16_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] model_decoder_layers_16_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[879] model_decoder_layers_16_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[880] model_decoder_layers_16_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[881] model_decoder_layers_16_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] model_decoder_layers_16_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] model_decoder_layers_16_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[884] model_decoder_layers_16_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] model_decoder_layers_16_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[886] model_decoder_layers_16_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] model_decoder_layers_16_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[888] model_decoder_layers_16_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[889] model_decoder_layers_16_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[890] model_decoder_layers_16_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] model_decoder_layers_16_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[892] model_decoder_layers_16_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] model_decoder_layers_16_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[894] model_decoder_layers_16_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[895] model_decoder_layers_16_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[896] model_decoder_layers_17_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] model_decoder_layers_17_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] model_decoder_layers_17_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[899] model_decoder_layers_17_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] model_decoder_layers_17_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[901] model_decoder_layers_17_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] model_decoder_layers_17_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[903] model_decoder_layers_17_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[904] model_decoder_layers_17_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[905] model_decoder_layers_17_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] model_decoder_layers_17_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] model_decoder_layers_17_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[908] model_decoder_layers_17_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] model_decoder_layers_17_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[910] model_decoder_layers_17_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] model_decoder_layers_17_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[912] model_decoder_layers_17_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[913] model_decoder_layers_17_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[914] model_decoder_layers_17_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] model_decoder_layers_17_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[916] model_decoder_layers_17_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] model_decoder_layers_17_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[918] model_decoder_layers_17_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[919] model_decoder_layers_17_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[920] model_decoder_layers_18_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] model_decoder_layers_18_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] model_decoder_layers_18_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[923] model_decoder_layers_18_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] model_decoder_layers_18_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[925] model_decoder_layers_18_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] model_decoder_layers_18_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[927] model_decoder_layers_18_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[928] model_decoder_layers_18_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[929] model_decoder_layers_18_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] model_decoder_layers_18_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] model_decoder_layers_18_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[932] model_decoder_layers_18_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] model_decoder_layers_18_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[934] model_decoder_layers_18_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] model_decoder_layers_18_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[936] model_decoder_layers_18_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[937] model_decoder_layers_18_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[938] model_decoder_layers_18_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] model_decoder_layers_18_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[940] model_decoder_layers_18_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] model_decoder_layers_18_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[942] model_decoder_layers_18_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[943] model_decoder_layers_18_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[944] model_decoder_layers_19_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] model_decoder_layers_19_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] model_decoder_layers_19_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[947] model_decoder_layers_19_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] model_decoder_layers_19_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[949] model_decoder_layers_19_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] model_decoder_layers_19_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[951] model_decoder_layers_19_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[952] model_decoder_layers_19_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[953] model_decoder_layers_19_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] model_decoder_layers_19_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] model_decoder_layers_19_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[956] model_decoder_layers_19_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] model_decoder_layers_19_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[958] model_decoder_layers_19_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] model_decoder_layers_19_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[960] model_decoder_layers_19_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[961] model_decoder_layers_19_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[962] model_decoder_layers_19_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] model_decoder_layers_19_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[964] model_decoder_layers_19_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] model_decoder_layers_19_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[966] model_decoder_layers_19_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[967] model_decoder_layers_19_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[968] model_decoder_layers_20_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] model_decoder_layers_20_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] model_decoder_layers_20_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[971] model_decoder_layers_20_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] model_decoder_layers_20_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[973] model_decoder_layers_20_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] model_decoder_layers_20_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[975] model_decoder_layers_20_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[976] model_decoder_layers_20_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[977] model_decoder_layers_20_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] model_decoder_layers_20_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] model_decoder_layers_20_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[980] model_decoder_layers_20_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] model_decoder_layers_20_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[982] model_decoder_layers_20_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] model_decoder_layers_20_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[984] model_decoder_layers_20_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[985] model_decoder_layers_20_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[986] model_decoder_layers_20_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] model_decoder_layers_20_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[988] model_decoder_layers_20_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] model_decoder_layers_20_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[990] model_decoder_layers_20_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[991] model_decoder_layers_20_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[992] model_decoder_layers_21_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] model_decoder_layers_21_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] model_decoder_layers_21_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[995] model_decoder_layers_21_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] model_decoder_layers_21_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[997] model_decoder_layers_21_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] model_decoder_layers_21_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[999] model_decoder_layers_21_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1000] model_decoder_layers_21_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1001] model_decoder_layers_21_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] model_decoder_layers_21_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] model_decoder_layers_21_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1004] model_decoder_layers_21_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] model_decoder_layers_21_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1006] model_decoder_layers_21_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] model_decoder_layers_21_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1008] model_decoder_layers_21_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1009] model_decoder_layers_21_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1010] model_decoder_layers_21_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] model_decoder_layers_21_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1012] model_decoder_layers_21_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] model_decoder_layers_21_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1014] model_decoder_layers_21_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1015] model_decoder_layers_21_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1016] model_decoder_layers_22_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] model_decoder_layers_22_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] model_decoder_layers_22_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1019] model_decoder_layers_22_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] model_decoder_layers_22_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1021] model_decoder_layers_22_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] model_decoder_layers_22_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1023] model_decoder_layers_22_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1024] model_decoder_layers_22_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1025] model_decoder_layers_22_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] model_decoder_layers_22_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] model_decoder_layers_22_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1028] model_decoder_layers_22_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] model_decoder_layers_22_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1030] model_decoder_layers_22_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] model_decoder_layers_22_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1032] model_decoder_layers_22_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1033] model_decoder_layers_22_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1034] model_decoder_layers_22_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] model_decoder_layers_22_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1036] model_decoder_layers_22_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] model_decoder_layers_22_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1038] model_decoder_layers_22_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1039] model_decoder_layers_22_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1040] model_decoder_layers_23_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] model_decoder_layers_23_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] model_decoder_layers_23_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1043] model_decoder_layers_23_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] model_decoder_layers_23_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1045] model_decoder_layers_23_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] model_decoder_layers_23_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1047] model_decoder_layers_23_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1048] model_decoder_layers_23_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1049] model_decoder_layers_23_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] model_decoder_layers_23_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] model_decoder_layers_23_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1052] model_decoder_layers_23_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] model_decoder_layers_23_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1054] model_decoder_layers_23_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] model_decoder_layers_23_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1056] model_decoder_layers_23_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1057] model_decoder_layers_23_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1058] model_decoder_layers_23_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] model_decoder_layers_23_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1060] model_decoder_layers_23_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] model_decoder_layers_23_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1062] model_decoder_layers_23_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1063] model_decoder_layers_23_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1064] model_decoder_layers_24_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] model_decoder_layers_24_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] model_decoder_layers_24_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1067] model_decoder_layers_24_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] model_decoder_layers_24_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1069] model_decoder_layers_24_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] model_decoder_layers_24_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1071] model_decoder_layers_24_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1072] model_decoder_layers_24_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1073] model_decoder_layers_24_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] model_decoder_layers_24_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] model_decoder_layers_24_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1076] model_decoder_layers_24_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] model_decoder_layers_24_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1078] model_decoder_layers_24_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] model_decoder_layers_24_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1080] model_decoder_layers_24_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1081] model_decoder_layers_24_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1082] model_decoder_layers_24_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] model_decoder_layers_24_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1084] model_decoder_layers_24_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] model_decoder_layers_24_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1086] model_decoder_layers_24_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1087] model_decoder_layers_24_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1088] model_decoder_layers_25_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] model_decoder_layers_25_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] model_decoder_layers_25_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1091] model_decoder_layers_25_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] model_decoder_layers_25_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1093] model_decoder_layers_25_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] model_decoder_layers_25_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1095] model_decoder_layers_25_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1096] model_decoder_layers_25_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1097] model_decoder_layers_25_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] model_decoder_layers_25_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] model_decoder_layers_25_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1100] model_decoder_layers_25_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] model_decoder_layers_25_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1102] model_decoder_layers_25_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] model_decoder_layers_25_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1104] model_decoder_layers_25_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1105] model_decoder_layers_25_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1106] model_decoder_layers_25_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] model_decoder_layers_25_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1108] model_decoder_layers_25_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] model_decoder_layers_25_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1110] model_decoder_layers_25_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1111] model_decoder_layers_25_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1112] model_decoder_layers_26_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] model_decoder_layers_26_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] model_decoder_layers_26_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1115] model_decoder_layers_26_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] model_decoder_layers_26_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1117] model_decoder_layers_26_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] model_decoder_layers_26_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1119] model_decoder_layers_26_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1120] model_decoder_layers_26_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1121] model_decoder_layers_26_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] model_decoder_layers_26_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] model_decoder_layers_26_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1124] model_decoder_layers_26_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] model_decoder_layers_26_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1126] model_decoder_layers_26_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] model_decoder_layers_26_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1128] model_decoder_layers_26_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1129] model_decoder_layers_26_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1130] model_decoder_layers_26_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] model_decoder_layers_26_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1132] model_decoder_layers_26_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] model_decoder_layers_26_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1134] model_decoder_layers_26_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1135] model_decoder_layers_26_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1136] model_decoder_layers_27_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] model_decoder_layers_27_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] model_decoder_layers_27_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1139] model_decoder_layers_27_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] model_decoder_layers_27_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1141] model_decoder_layers_27_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] model_decoder_layers_27_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1143] model_decoder_layers_27_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1144] model_decoder_layers_27_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1145] model_decoder_layers_27_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] model_decoder_layers_27_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] model_decoder_layers_27_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1148] model_decoder_layers_27_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] model_decoder_layers_27_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1150] model_decoder_layers_27_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] model_decoder_layers_27_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1152] model_decoder_layers_27_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1153] model_decoder_layers_27_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1154] model_decoder_layers_27_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] model_decoder_layers_27_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1156] model_decoder_layers_27_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] model_decoder_layers_27_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1158] model_decoder_layers_27_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1159] model_decoder_layers_27_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1160] model_decoder_layers_28_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] model_decoder_layers_28_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] model_decoder_layers_28_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1163] model_decoder_layers_28_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] model_decoder_layers_28_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1165] model_decoder_layers_28_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] model_decoder_layers_28_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1167] model_decoder_layers_28_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1168] model_decoder_layers_28_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1169] model_decoder_layers_28_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] model_decoder_layers_28_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] model_decoder_layers_28_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1172] model_decoder_layers_28_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] model_decoder_layers_28_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1174] model_decoder_layers_28_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] model_decoder_layers_28_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1176] model_decoder_layers_28_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1177] model_decoder_layers_28_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1178] model_decoder_layers_28_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] model_decoder_layers_28_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1180] model_decoder_layers_28_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] model_decoder_layers_28_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1182] model_decoder_layers_28_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1183] model_decoder_layers_28_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1184] model_decoder_layers_29_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] model_decoder_layers_29_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] model_decoder_layers_29_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1187] model_decoder_layers_29_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] model_decoder_layers_29_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1189] model_decoder_layers_29_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] model_decoder_layers_29_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1191] model_decoder_layers_29_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1192] model_decoder_layers_29_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1193] model_decoder_layers_29_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] model_decoder_layers_29_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] model_decoder_layers_29_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1196] model_decoder_layers_29_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] model_decoder_layers_29_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1198] model_decoder_layers_29_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] model_decoder_layers_29_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1200] model_decoder_layers_29_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1201] model_decoder_layers_29_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1202] model_decoder_layers_29_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] model_decoder_layers_29_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1204] model_decoder_layers_29_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] model_decoder_layers_29_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1206] model_decoder_layers_29_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1207] model_decoder_layers_29_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1208] model_decoder_layers_30_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] model_decoder_layers_30_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] model_decoder_layers_30_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1211] model_decoder_layers_30_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] model_decoder_layers_30_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1213] model_decoder_layers_30_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] model_decoder_layers_30_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1215] model_decoder_layers_30_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1216] model_decoder_layers_30_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1217] model_decoder_layers_30_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] model_decoder_layers_30_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] model_decoder_layers_30_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1220] model_decoder_layers_30_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] model_decoder_layers_30_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1222] model_decoder_layers_30_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] model_decoder_layers_30_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1224] model_decoder_layers_30_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1225] model_decoder_layers_30_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1226] model_decoder_layers_30_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] model_decoder_layers_30_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1228] model_decoder_layers_30_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] model_decoder_layers_30_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1230] model_decoder_layers_30_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1231] model_decoder_layers_30_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1232] model_decoder_layers_31_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] model_decoder_layers_31_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] model_decoder_layers_31_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1235] model_decoder_layers_31_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] model_decoder_layers_31_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1237] model_decoder_layers_31_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] model_decoder_layers_31_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1239] model_decoder_layers_31_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1240] model_decoder_layers_31_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1241] model_decoder_layers_31_encoder_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] model_decoder_layers_31_encoder_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] model_decoder_layers_31_encoder_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1244] model_decoder_layers_31_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] model_decoder_layers_31_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1246] model_decoder_layers_31_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] model_decoder_layers_31_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1248] model_decoder_layers_31_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1249] model_decoder_layers_31_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1250] model_decoder_layers_31_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] model_decoder_layers_31_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1252] model_decoder_layers_31_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] model_decoder_layers_31_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1254] model_decoder_layers_31_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1255] model_decoder_layers_31_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1256] model_decoder_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1257] model_decoder_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1258] reshape384: R.Tensor((seq_len,), dtype="int32") = R.reshape(input_ids, R.shape([seq_len])) take: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight2, reshape384, axis=0) reshape385: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take, R.shape([1, seq_len, 1280])) lv68: R.Tensor((seq_len,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((seq_len,), dtype="int32"),)) take1: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight2, lv68, axis=0) reshape386: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take1, R.shape([1, seq_len, 1280])) add257: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(reshape385, reshape386) layer_norm65: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add257, model_decoder_layers_0_self_attn_layer_norm_weight2, model_decoder_layers_0_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims257: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_q_proj_weight2, axes=None) matmul256: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm65, permute_dims257, out_dtype="void") add258: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul256, model_decoder_layers_0_self_attn_q_proj_bias2) reshape387: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add258, R.shape([1, seq_len, 20, 64])) permute_dims258: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_k_proj_weight2, axes=None) matmul257: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm65, permute_dims258, out_dtype="void") reshape388: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul257, R.shape([1, seq_len, 20, 64])) permute_dims259: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_v_proj_weight2, axes=None) matmul258: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm65, permute_dims259, out_dtype="void") add259: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul258, model_decoder_layers_0_self_attn_v_proj_bias2) reshape389: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add259, R.shape([1, seq_len, 20, 64])) concat: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape387, reshape388, reshape389), axis=2) reshape390: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat, R.shape([seq_len, 60, 64])) lv69 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape390), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape391: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv69, R.shape([1, seq_len, 20, 64])) reshape392: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape391, R.shape([1, seq_len, 1280])) permute_dims260: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_out_proj_weight2, axes=None) matmul259: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape392, permute_dims260, out_dtype="void") add260: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul259, model_decoder_layers_0_self_attn_out_proj_bias2) add261: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add257, add260) layer_norm66: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add261, model_decoder_layers_0_encoder_attn_layer_norm_weight2, model_decoder_layers_0_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims261: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_q_proj_weight2, axes=None) matmul260: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm66, permute_dims261, out_dtype="void") add262: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul260, model_decoder_layers_0_encoder_attn_q_proj_bias2) reshape393: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add262, R.shape([1, seq_len, 20, 64])) reshape394: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape393, R.shape([seq_len, 20, 64])) lv70 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape394), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape395: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv70, R.shape([1, seq_len, 20, 64])) reshape396: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape395, R.shape([1, seq_len, 1280])) permute_dims262: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_out_proj_weight2, axes=None) matmul261: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape396, permute_dims262, out_dtype="void") add263: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul261, model_decoder_layers_0_encoder_attn_out_proj_bias2) add264: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add261, add263) layer_norm67: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add264, model_decoder_layers_0_final_layer_norm_weight2, model_decoder_layers_0_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims263: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc1_weight2, axes=None) matmul262: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm67, permute_dims263, out_dtype="void") add265: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul262, model_decoder_layers_0_fc1_bias2) gelu34: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add265) permute_dims264: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc2_weight2, axes=None) matmul263: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu34, permute_dims264, out_dtype="void") add266: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul263, model_decoder_layers_0_fc2_bias2) add267: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add264, add266) layer_norm68: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add267, model_decoder_layers_1_self_attn_layer_norm_weight2, model_decoder_layers_1_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims265: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_q_proj_weight2, axes=None) matmul264: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm68, permute_dims265, out_dtype="void") add268: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul264, model_decoder_layers_1_self_attn_q_proj_bias2) reshape397: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add268, R.shape([1, seq_len, 20, 64])) permute_dims266: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_k_proj_weight2, axes=None) matmul265: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm68, permute_dims266, out_dtype="void") reshape398: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul265, R.shape([1, seq_len, 20, 64])) permute_dims267: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_v_proj_weight2, axes=None) matmul266: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm68, permute_dims267, out_dtype="void") add269: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul266, model_decoder_layers_1_self_attn_v_proj_bias2) reshape399: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add269, R.shape([1, seq_len, 20, 64])) concat1: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape397, reshape398, reshape399), axis=2) reshape400: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat1, R.shape([seq_len, 60, 64])) lv71 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape400), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape401: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv71, R.shape([1, seq_len, 20, 64])) reshape402: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape401, R.shape([1, seq_len, 1280])) permute_dims268: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_out_proj_weight2, axes=None) matmul267: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape402, permute_dims268, out_dtype="void") add270: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul267, model_decoder_layers_1_self_attn_out_proj_bias2) add271: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add267, add270) layer_norm69: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add271, model_decoder_layers_1_encoder_attn_layer_norm_weight2, model_decoder_layers_1_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims269: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_q_proj_weight2, axes=None) matmul268: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm69, permute_dims269, out_dtype="void") add272: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul268, model_decoder_layers_1_encoder_attn_q_proj_bias2) reshape403: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add272, R.shape([1, seq_len, 20, 64])) reshape404: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape403, R.shape([seq_len, 20, 64])) lv72 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape404), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape405: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv72, R.shape([1, seq_len, 20, 64])) reshape406: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape405, R.shape([1, seq_len, 1280])) permute_dims270: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_out_proj_weight2, axes=None) matmul269: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape406, permute_dims270, out_dtype="void") add273: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul269, model_decoder_layers_1_encoder_attn_out_proj_bias2) add274: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add271, add273) layer_norm70: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add274, model_decoder_layers_1_final_layer_norm_weight2, model_decoder_layers_1_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims271: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc1_weight2, axes=None) matmul270: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm70, permute_dims271, out_dtype="void") add275: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul270, model_decoder_layers_1_fc1_bias2) gelu35: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add275) permute_dims272: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc2_weight2, axes=None) matmul271: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu35, permute_dims272, out_dtype="void") add276: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul271, model_decoder_layers_1_fc2_bias2) add277: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add274, add276) layer_norm71: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add277, model_decoder_layers_2_self_attn_layer_norm_weight2, model_decoder_layers_2_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims273: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_q_proj_weight2, axes=None) matmul272: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm71, permute_dims273, out_dtype="void") add278: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul272, model_decoder_layers_2_self_attn_q_proj_bias2) reshape407: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add278, R.shape([1, seq_len, 20, 64])) permute_dims274: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_k_proj_weight2, axes=None) matmul273: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm71, permute_dims274, out_dtype="void") reshape408: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul273, R.shape([1, seq_len, 20, 64])) permute_dims275: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_v_proj_weight2, axes=None) matmul274: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm71, permute_dims275, out_dtype="void") add279: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul274, model_decoder_layers_2_self_attn_v_proj_bias2) reshape409: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add279, R.shape([1, seq_len, 20, 64])) concat2: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape407, reshape408, reshape409), axis=2) reshape410: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat2, R.shape([seq_len, 60, 64])) lv73 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape410), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape411: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv73, R.shape([1, seq_len, 20, 64])) reshape412: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape411, R.shape([1, seq_len, 1280])) permute_dims276: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_out_proj_weight2, axes=None) matmul275: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape412, permute_dims276, out_dtype="void") add280: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul275, model_decoder_layers_2_self_attn_out_proj_bias2) add281: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add277, add280) layer_norm72: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add281, model_decoder_layers_2_encoder_attn_layer_norm_weight2, model_decoder_layers_2_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims277: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_q_proj_weight2, axes=None) matmul276: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm72, permute_dims277, out_dtype="void") add282: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul276, model_decoder_layers_2_encoder_attn_q_proj_bias2) reshape413: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add282, R.shape([1, seq_len, 20, 64])) reshape414: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape413, R.shape([seq_len, 20, 64])) lv74 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape414), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape415: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv74, R.shape([1, seq_len, 20, 64])) reshape416: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape415, R.shape([1, seq_len, 1280])) permute_dims278: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_out_proj_weight2, axes=None) matmul277: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape416, permute_dims278, out_dtype="void") add283: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul277, model_decoder_layers_2_encoder_attn_out_proj_bias2) add284: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add281, add283) layer_norm73: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add284, model_decoder_layers_2_final_layer_norm_weight2, model_decoder_layers_2_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims279: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc1_weight2, axes=None) matmul278: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm73, permute_dims279, out_dtype="void") add285: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul278, model_decoder_layers_2_fc1_bias2) gelu36: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add285) permute_dims280: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc2_weight2, axes=None) matmul279: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu36, permute_dims280, out_dtype="void") add286: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul279, model_decoder_layers_2_fc2_bias2) add287: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add284, add286) layer_norm74: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add287, model_decoder_layers_3_self_attn_layer_norm_weight2, model_decoder_layers_3_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims281: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_q_proj_weight2, axes=None) matmul280: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm74, permute_dims281, out_dtype="void") add288: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul280, model_decoder_layers_3_self_attn_q_proj_bias2) reshape417: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add288, R.shape([1, seq_len, 20, 64])) permute_dims282: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_k_proj_weight2, axes=None) matmul281: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm74, permute_dims282, out_dtype="void") reshape418: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul281, R.shape([1, seq_len, 20, 64])) permute_dims283: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_v_proj_weight2, axes=None) matmul282: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm74, permute_dims283, out_dtype="void") add289: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul282, model_decoder_layers_3_self_attn_v_proj_bias2) reshape419: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add289, R.shape([1, seq_len, 20, 64])) concat3: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape417, reshape418, reshape419), axis=2) reshape420: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat3, R.shape([seq_len, 60, 64])) lv75 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape420), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape421: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv75, R.shape([1, seq_len, 20, 64])) reshape422: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape421, R.shape([1, seq_len, 1280])) permute_dims284: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_out_proj_weight2, axes=None) matmul283: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape422, permute_dims284, out_dtype="void") add290: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul283, model_decoder_layers_3_self_attn_out_proj_bias2) add291: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add287, add290) layer_norm75: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add291, model_decoder_layers_3_encoder_attn_layer_norm_weight2, model_decoder_layers_3_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims285: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_q_proj_weight2, axes=None) matmul284: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm75, permute_dims285, out_dtype="void") add292: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul284, model_decoder_layers_3_encoder_attn_q_proj_bias2) reshape423: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add292, R.shape([1, seq_len, 20, 64])) reshape424: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape423, R.shape([seq_len, 20, 64])) lv76 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape424), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape425: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv76, R.shape([1, seq_len, 20, 64])) reshape426: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape425, R.shape([1, seq_len, 1280])) permute_dims286: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_out_proj_weight2, axes=None) matmul285: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape426, permute_dims286, out_dtype="void") add293: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul285, model_decoder_layers_3_encoder_attn_out_proj_bias2) add294: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add291, add293) layer_norm76: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add294, model_decoder_layers_3_final_layer_norm_weight2, model_decoder_layers_3_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims287: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc1_weight2, axes=None) matmul286: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm76, permute_dims287, out_dtype="void") add295: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul286, model_decoder_layers_3_fc1_bias2) gelu37: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add295) permute_dims288: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc2_weight2, axes=None) matmul287: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu37, permute_dims288, out_dtype="void") add296: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul287, model_decoder_layers_3_fc2_bias2) add297: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add294, add296) layer_norm77: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add297, model_decoder_layers_4_self_attn_layer_norm_weight2, model_decoder_layers_4_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims289: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_q_proj_weight2, axes=None) matmul288: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm77, permute_dims289, out_dtype="void") add298: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul288, model_decoder_layers_4_self_attn_q_proj_bias2) reshape427: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add298, R.shape([1, seq_len, 20, 64])) permute_dims290: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_k_proj_weight2, axes=None) matmul289: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm77, permute_dims290, out_dtype="void") reshape428: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul289, R.shape([1, seq_len, 20, 64])) permute_dims291: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_v_proj_weight2, axes=None) matmul290: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm77, permute_dims291, out_dtype="void") add299: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul290, model_decoder_layers_4_self_attn_v_proj_bias2) reshape429: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add299, R.shape([1, seq_len, 20, 64])) concat4: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape427, reshape428, reshape429), axis=2) reshape430: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat4, R.shape([seq_len, 60, 64])) lv77 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape430), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape431: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv77, R.shape([1, seq_len, 20, 64])) reshape432: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape431, R.shape([1, seq_len, 1280])) permute_dims292: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_out_proj_weight2, axes=None) matmul291: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape432, permute_dims292, out_dtype="void") add300: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul291, model_decoder_layers_4_self_attn_out_proj_bias2) add301: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add297, add300) layer_norm78: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add301, model_decoder_layers_4_encoder_attn_layer_norm_weight2, model_decoder_layers_4_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims293: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_q_proj_weight2, axes=None) matmul292: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm78, permute_dims293, out_dtype="void") add302: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul292, model_decoder_layers_4_encoder_attn_q_proj_bias2) reshape433: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add302, R.shape([1, seq_len, 20, 64])) reshape434: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape433, R.shape([seq_len, 20, 64])) lv78 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape434), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape435: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv78, R.shape([1, seq_len, 20, 64])) reshape436: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape435, R.shape([1, seq_len, 1280])) permute_dims294: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_out_proj_weight2, axes=None) matmul293: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape436, permute_dims294, out_dtype="void") add303: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul293, model_decoder_layers_4_encoder_attn_out_proj_bias2) add304: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add301, add303) layer_norm79: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add304, model_decoder_layers_4_final_layer_norm_weight2, model_decoder_layers_4_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims295: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc1_weight2, axes=None) matmul294: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm79, permute_dims295, out_dtype="void") add305: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul294, model_decoder_layers_4_fc1_bias2) gelu38: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add305) permute_dims296: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc2_weight2, axes=None) matmul295: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu38, permute_dims296, out_dtype="void") add306: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul295, model_decoder_layers_4_fc2_bias2) add307: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add304, add306) layer_norm80: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add307, model_decoder_layers_5_self_attn_layer_norm_weight2, model_decoder_layers_5_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims297: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_q_proj_weight2, axes=None) matmul296: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm80, permute_dims297, out_dtype="void") add308: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul296, model_decoder_layers_5_self_attn_q_proj_bias2) reshape437: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add308, R.shape([1, seq_len, 20, 64])) permute_dims298: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_k_proj_weight2, axes=None) matmul297: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm80, permute_dims298, out_dtype="void") reshape438: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul297, R.shape([1, seq_len, 20, 64])) permute_dims299: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_v_proj_weight2, axes=None) matmul298: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm80, permute_dims299, out_dtype="void") add309: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul298, model_decoder_layers_5_self_attn_v_proj_bias2) reshape439: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add309, R.shape([1, seq_len, 20, 64])) concat5: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape437, reshape438, reshape439), axis=2) reshape440: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat5, R.shape([seq_len, 60, 64])) lv79 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape440), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape441: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv79, R.shape([1, seq_len, 20, 64])) reshape442: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape441, R.shape([1, seq_len, 1280])) permute_dims300: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_out_proj_weight2, axes=None) matmul299: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape442, permute_dims300, out_dtype="void") add310: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul299, model_decoder_layers_5_self_attn_out_proj_bias2) add311: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add307, add310) layer_norm81: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add311, model_decoder_layers_5_encoder_attn_layer_norm_weight2, model_decoder_layers_5_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims301: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_q_proj_weight2, axes=None) matmul300: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm81, permute_dims301, out_dtype="void") add312: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul300, model_decoder_layers_5_encoder_attn_q_proj_bias2) reshape443: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add312, R.shape([1, seq_len, 20, 64])) reshape444: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape443, R.shape([seq_len, 20, 64])) lv80 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape444), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape445: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv80, R.shape([1, seq_len, 20, 64])) reshape446: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape445, R.shape([1, seq_len, 1280])) permute_dims302: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_out_proj_weight2, axes=None) matmul301: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape446, permute_dims302, out_dtype="void") add313: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul301, model_decoder_layers_5_encoder_attn_out_proj_bias2) add314: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add311, add313) layer_norm82: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add314, model_decoder_layers_5_final_layer_norm_weight2, model_decoder_layers_5_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims303: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc1_weight2, axes=None) matmul302: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm82, permute_dims303, out_dtype="void") add315: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul302, model_decoder_layers_5_fc1_bias2) gelu39: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add315) permute_dims304: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc2_weight2, axes=None) matmul303: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu39, permute_dims304, out_dtype="void") add316: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul303, model_decoder_layers_5_fc2_bias2) add317: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add314, add316) layer_norm83: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add317, model_decoder_layers_6_self_attn_layer_norm_weight2, model_decoder_layers_6_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims305: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_q_proj_weight2, axes=None) matmul304: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm83, permute_dims305, out_dtype="void") add318: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul304, model_decoder_layers_6_self_attn_q_proj_bias2) reshape447: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add318, R.shape([1, seq_len, 20, 64])) permute_dims306: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_k_proj_weight2, axes=None) matmul305: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm83, permute_dims306, out_dtype="void") reshape448: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul305, R.shape([1, seq_len, 20, 64])) permute_dims307: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_v_proj_weight2, axes=None) matmul306: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm83, permute_dims307, out_dtype="void") add319: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul306, model_decoder_layers_6_self_attn_v_proj_bias2) reshape449: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add319, R.shape([1, seq_len, 20, 64])) concat6: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape447, reshape448, reshape449), axis=2) reshape450: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat6, R.shape([seq_len, 60, 64])) lv81 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape450), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape451: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv81, R.shape([1, seq_len, 20, 64])) reshape452: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape451, R.shape([1, seq_len, 1280])) permute_dims308: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_out_proj_weight2, axes=None) matmul307: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape452, permute_dims308, out_dtype="void") add320: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul307, model_decoder_layers_6_self_attn_out_proj_bias2) add321: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add317, add320) layer_norm84: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add321, model_decoder_layers_6_encoder_attn_layer_norm_weight2, model_decoder_layers_6_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims309: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_q_proj_weight2, axes=None) matmul308: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm84, permute_dims309, out_dtype="void") add322: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul308, model_decoder_layers_6_encoder_attn_q_proj_bias2) reshape453: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add322, R.shape([1, seq_len, 20, 64])) reshape454: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape453, R.shape([seq_len, 20, 64])) lv82 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape454), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape455: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv82, R.shape([1, seq_len, 20, 64])) reshape456: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape455, R.shape([1, seq_len, 1280])) permute_dims310: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_out_proj_weight2, axes=None) matmul309: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape456, permute_dims310, out_dtype="void") add323: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul309, model_decoder_layers_6_encoder_attn_out_proj_bias2) add324: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add321, add323) layer_norm85: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add324, model_decoder_layers_6_final_layer_norm_weight2, model_decoder_layers_6_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims311: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc1_weight2, axes=None) matmul310: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm85, permute_dims311, out_dtype="void") add325: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul310, model_decoder_layers_6_fc1_bias2) gelu40: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add325) permute_dims312: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc2_weight2, axes=None) matmul311: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu40, permute_dims312, out_dtype="void") add326: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul311, model_decoder_layers_6_fc2_bias2) add327: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add324, add326) layer_norm86: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add327, model_decoder_layers_7_self_attn_layer_norm_weight2, model_decoder_layers_7_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims313: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_q_proj_weight2, axes=None) matmul312: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm86, permute_dims313, out_dtype="void") add328: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul312, model_decoder_layers_7_self_attn_q_proj_bias2) reshape457: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add328, R.shape([1, seq_len, 20, 64])) permute_dims314: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_k_proj_weight2, axes=None) matmul313: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm86, permute_dims314, out_dtype="void") reshape458: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul313, R.shape([1, seq_len, 20, 64])) permute_dims315: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_v_proj_weight2, axes=None) matmul314: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm86, permute_dims315, out_dtype="void") add329: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul314, model_decoder_layers_7_self_attn_v_proj_bias2) reshape459: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add329, R.shape([1, seq_len, 20, 64])) concat7: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape457, reshape458, reshape459), axis=2) reshape460: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat7, R.shape([seq_len, 60, 64])) lv83 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape460), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape461: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv83, R.shape([1, seq_len, 20, 64])) reshape462: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape461, R.shape([1, seq_len, 1280])) permute_dims316: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_out_proj_weight2, axes=None) matmul315: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape462, permute_dims316, out_dtype="void") add330: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul315, model_decoder_layers_7_self_attn_out_proj_bias2) add331: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add327, add330) layer_norm87: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add331, model_decoder_layers_7_encoder_attn_layer_norm_weight2, model_decoder_layers_7_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims317: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_q_proj_weight2, axes=None) matmul316: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm87, permute_dims317, out_dtype="void") add332: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul316, model_decoder_layers_7_encoder_attn_q_proj_bias2) reshape463: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add332, R.shape([1, seq_len, 20, 64])) reshape464: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape463, R.shape([seq_len, 20, 64])) lv84 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape464), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape465: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv84, R.shape([1, seq_len, 20, 64])) reshape466: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape465, R.shape([1, seq_len, 1280])) permute_dims318: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_out_proj_weight2, axes=None) matmul317: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape466, permute_dims318, out_dtype="void") add333: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul317, model_decoder_layers_7_encoder_attn_out_proj_bias2) add334: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add331, add333) layer_norm88: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add334, model_decoder_layers_7_final_layer_norm_weight2, model_decoder_layers_7_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims319: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc1_weight2, axes=None) matmul318: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm88, permute_dims319, out_dtype="void") add335: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul318, model_decoder_layers_7_fc1_bias2) gelu41: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add335) permute_dims320: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc2_weight2, axes=None) matmul319: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu41, permute_dims320, out_dtype="void") add336: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul319, model_decoder_layers_7_fc2_bias2) add337: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add334, add336) layer_norm89: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add337, model_decoder_layers_8_self_attn_layer_norm_weight2, model_decoder_layers_8_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims321: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_q_proj_weight2, axes=None) matmul320: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm89, permute_dims321, out_dtype="void") add338: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul320, model_decoder_layers_8_self_attn_q_proj_bias2) reshape467: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add338, R.shape([1, seq_len, 20, 64])) permute_dims322: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_k_proj_weight2, axes=None) matmul321: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm89, permute_dims322, out_dtype="void") reshape468: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul321, R.shape([1, seq_len, 20, 64])) permute_dims323: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_v_proj_weight2, axes=None) matmul322: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm89, permute_dims323, out_dtype="void") add339: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul322, model_decoder_layers_8_self_attn_v_proj_bias2) reshape469: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add339, R.shape([1, seq_len, 20, 64])) concat8: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape467, reshape468, reshape469), axis=2) reshape470: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat8, R.shape([seq_len, 60, 64])) lv85 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape470), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape471: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv85, R.shape([1, seq_len, 20, 64])) reshape472: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape471, R.shape([1, seq_len, 1280])) permute_dims324: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_out_proj_weight2, axes=None) matmul323: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape472, permute_dims324, out_dtype="void") add340: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul323, model_decoder_layers_8_self_attn_out_proj_bias2) add341: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add337, add340) layer_norm90: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add341, model_decoder_layers_8_encoder_attn_layer_norm_weight2, model_decoder_layers_8_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims325: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_q_proj_weight2, axes=None) matmul324: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm90, permute_dims325, out_dtype="void") add342: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul324, model_decoder_layers_8_encoder_attn_q_proj_bias2) reshape473: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add342, R.shape([1, seq_len, 20, 64])) reshape474: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape473, R.shape([seq_len, 20, 64])) lv86 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape474), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape475: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv86, R.shape([1, seq_len, 20, 64])) reshape476: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape475, R.shape([1, seq_len, 1280])) permute_dims326: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_out_proj_weight2, axes=None) matmul325: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape476, permute_dims326, out_dtype="void") add343: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul325, model_decoder_layers_8_encoder_attn_out_proj_bias2) add344: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add341, add343) layer_norm91: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add344, model_decoder_layers_8_final_layer_norm_weight2, model_decoder_layers_8_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims327: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc1_weight2, axes=None) matmul326: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm91, permute_dims327, out_dtype="void") add345: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul326, model_decoder_layers_8_fc1_bias2) gelu42: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add345) permute_dims328: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc2_weight2, axes=None) matmul327: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu42, permute_dims328, out_dtype="void") add346: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul327, model_decoder_layers_8_fc2_bias2) add347: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add344, add346) layer_norm92: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add347, model_decoder_layers_9_self_attn_layer_norm_weight2, model_decoder_layers_9_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims329: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_q_proj_weight2, axes=None) matmul328: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm92, permute_dims329, out_dtype="void") add348: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul328, model_decoder_layers_9_self_attn_q_proj_bias2) reshape477: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add348, R.shape([1, seq_len, 20, 64])) permute_dims330: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_k_proj_weight2, axes=None) matmul329: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm92, permute_dims330, out_dtype="void") reshape478: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul329, R.shape([1, seq_len, 20, 64])) permute_dims331: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_v_proj_weight2, axes=None) matmul330: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm92, permute_dims331, out_dtype="void") add349: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul330, model_decoder_layers_9_self_attn_v_proj_bias2) reshape479: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add349, R.shape([1, seq_len, 20, 64])) concat9: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape477, reshape478, reshape479), axis=2) reshape480: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat9, R.shape([seq_len, 60, 64])) lv87 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape480), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape481: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv87, R.shape([1, seq_len, 20, 64])) reshape482: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape481, R.shape([1, seq_len, 1280])) permute_dims332: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_out_proj_weight2, axes=None) matmul331: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape482, permute_dims332, out_dtype="void") add350: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul331, model_decoder_layers_9_self_attn_out_proj_bias2) add351: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add347, add350) layer_norm93: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add351, model_decoder_layers_9_encoder_attn_layer_norm_weight2, model_decoder_layers_9_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims333: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_q_proj_weight2, axes=None) matmul332: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm93, permute_dims333, out_dtype="void") add352: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul332, model_decoder_layers_9_encoder_attn_q_proj_bias2) reshape483: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add352, R.shape([1, seq_len, 20, 64])) reshape484: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape483, R.shape([seq_len, 20, 64])) lv88 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape484), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape485: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv88, R.shape([1, seq_len, 20, 64])) reshape486: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape485, R.shape([1, seq_len, 1280])) permute_dims334: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_out_proj_weight2, axes=None) matmul333: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape486, permute_dims334, out_dtype="void") add353: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul333, model_decoder_layers_9_encoder_attn_out_proj_bias2) add354: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add351, add353) layer_norm94: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add354, model_decoder_layers_9_final_layer_norm_weight2, model_decoder_layers_9_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims335: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc1_weight2, axes=None) matmul334: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm94, permute_dims335, out_dtype="void") add355: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul334, model_decoder_layers_9_fc1_bias2) gelu43: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add355) permute_dims336: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc2_weight2, axes=None) matmul335: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu43, permute_dims336, out_dtype="void") add356: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul335, model_decoder_layers_9_fc2_bias2) add357: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add354, add356) layer_norm95: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add357, model_decoder_layers_10_self_attn_layer_norm_weight2, model_decoder_layers_10_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims337: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_q_proj_weight2, axes=None) matmul336: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm95, permute_dims337, out_dtype="void") add358: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul336, model_decoder_layers_10_self_attn_q_proj_bias2) reshape487: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add358, R.shape([1, seq_len, 20, 64])) permute_dims338: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_k_proj_weight2, axes=None) matmul337: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm95, permute_dims338, out_dtype="void") reshape488: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul337, R.shape([1, seq_len, 20, 64])) permute_dims339: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_v_proj_weight2, axes=None) matmul338: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm95, permute_dims339, out_dtype="void") add359: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul338, model_decoder_layers_10_self_attn_v_proj_bias2) reshape489: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add359, R.shape([1, seq_len, 20, 64])) concat10: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape487, reshape488, reshape489), axis=2) reshape490: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat10, R.shape([seq_len, 60, 64])) lv89 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape490), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape491: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv89, R.shape([1, seq_len, 20, 64])) reshape492: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape491, R.shape([1, seq_len, 1280])) permute_dims340: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_out_proj_weight2, axes=None) matmul339: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape492, permute_dims340, out_dtype="void") add360: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul339, model_decoder_layers_10_self_attn_out_proj_bias2) add361: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add357, add360) layer_norm96: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add361, model_decoder_layers_10_encoder_attn_layer_norm_weight2, model_decoder_layers_10_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims341: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_q_proj_weight2, axes=None) matmul340: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm96, permute_dims341, out_dtype="void") add362: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul340, model_decoder_layers_10_encoder_attn_q_proj_bias2) reshape493: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add362, R.shape([1, seq_len, 20, 64])) reshape494: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape493, R.shape([seq_len, 20, 64])) lv90 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape494), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape495: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv90, R.shape([1, seq_len, 20, 64])) reshape496: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape495, R.shape([1, seq_len, 1280])) permute_dims342: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_out_proj_weight2, axes=None) matmul341: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape496, permute_dims342, out_dtype="void") add363: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul341, model_decoder_layers_10_encoder_attn_out_proj_bias2) add364: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add361, add363) layer_norm97: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add364, model_decoder_layers_10_final_layer_norm_weight2, model_decoder_layers_10_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims343: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc1_weight2, axes=None) matmul342: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm97, permute_dims343, out_dtype="void") add365: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul342, model_decoder_layers_10_fc1_bias2) gelu44: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add365) permute_dims344: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc2_weight2, axes=None) matmul343: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu44, permute_dims344, out_dtype="void") add366: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul343, model_decoder_layers_10_fc2_bias2) add367: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add364, add366) layer_norm98: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add367, model_decoder_layers_11_self_attn_layer_norm_weight2, model_decoder_layers_11_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims345: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_q_proj_weight2, axes=None) matmul344: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm98, permute_dims345, out_dtype="void") add368: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul344, model_decoder_layers_11_self_attn_q_proj_bias2) reshape497: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add368, R.shape([1, seq_len, 20, 64])) permute_dims346: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_k_proj_weight2, axes=None) matmul345: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm98, permute_dims346, out_dtype="void") reshape498: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul345, R.shape([1, seq_len, 20, 64])) permute_dims347: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_v_proj_weight2, axes=None) matmul346: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm98, permute_dims347, out_dtype="void") add369: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul346, model_decoder_layers_11_self_attn_v_proj_bias2) reshape499: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add369, R.shape([1, seq_len, 20, 64])) concat11: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape497, reshape498, reshape499), axis=2) reshape500: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat11, R.shape([seq_len, 60, 64])) lv91 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape500), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape501: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv91, R.shape([1, seq_len, 20, 64])) reshape502: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape501, R.shape([1, seq_len, 1280])) permute_dims348: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_out_proj_weight2, axes=None) matmul347: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape502, permute_dims348, out_dtype="void") add370: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul347, model_decoder_layers_11_self_attn_out_proj_bias2) add371: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add367, add370) layer_norm99: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add371, model_decoder_layers_11_encoder_attn_layer_norm_weight2, model_decoder_layers_11_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims349: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_q_proj_weight2, axes=None) matmul348: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm99, permute_dims349, out_dtype="void") add372: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul348, model_decoder_layers_11_encoder_attn_q_proj_bias2) reshape503: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add372, R.shape([1, seq_len, 20, 64])) reshape504: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape503, R.shape([seq_len, 20, 64])) lv92 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape504), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape505: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv92, R.shape([1, seq_len, 20, 64])) reshape506: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape505, R.shape([1, seq_len, 1280])) permute_dims350: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_out_proj_weight2, axes=None) matmul349: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape506, permute_dims350, out_dtype="void") add373: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul349, model_decoder_layers_11_encoder_attn_out_proj_bias2) add374: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add371, add373) layer_norm100: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add374, model_decoder_layers_11_final_layer_norm_weight2, model_decoder_layers_11_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims351: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc1_weight2, axes=None) matmul350: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm100, permute_dims351, out_dtype="void") add375: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul350, model_decoder_layers_11_fc1_bias2) gelu45: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add375) permute_dims352: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc2_weight2, axes=None) matmul351: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu45, permute_dims352, out_dtype="void") add376: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul351, model_decoder_layers_11_fc2_bias2) add377: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add374, add376) layer_norm101: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add377, model_decoder_layers_12_self_attn_layer_norm_weight2, model_decoder_layers_12_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims353: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_q_proj_weight2, axes=None) matmul352: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm101, permute_dims353, out_dtype="void") add378: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul352, model_decoder_layers_12_self_attn_q_proj_bias2) reshape507: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add378, R.shape([1, seq_len, 20, 64])) permute_dims354: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_k_proj_weight2, axes=None) matmul353: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm101, permute_dims354, out_dtype="void") reshape508: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul353, R.shape([1, seq_len, 20, 64])) permute_dims355: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_v_proj_weight2, axes=None) matmul354: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm101, permute_dims355, out_dtype="void") add379: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul354, model_decoder_layers_12_self_attn_v_proj_bias2) reshape509: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add379, R.shape([1, seq_len, 20, 64])) concat12: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape507, reshape508, reshape509), axis=2) reshape510: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat12, R.shape([seq_len, 60, 64])) lv93 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape510), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape511: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv93, R.shape([1, seq_len, 20, 64])) reshape512: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape511, R.shape([1, seq_len, 1280])) permute_dims356: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_out_proj_weight2, axes=None) matmul355: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape512, permute_dims356, out_dtype="void") add380: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul355, model_decoder_layers_12_self_attn_out_proj_bias2) add381: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add377, add380) layer_norm102: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add381, model_decoder_layers_12_encoder_attn_layer_norm_weight2, model_decoder_layers_12_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims357: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_q_proj_weight2, axes=None) matmul356: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm102, permute_dims357, out_dtype="void") add382: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul356, model_decoder_layers_12_encoder_attn_q_proj_bias2) reshape513: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add382, R.shape([1, seq_len, 20, 64])) reshape514: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape513, R.shape([seq_len, 20, 64])) lv94 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape514), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape515: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv94, R.shape([1, seq_len, 20, 64])) reshape516: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape515, R.shape([1, seq_len, 1280])) permute_dims358: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_out_proj_weight2, axes=None) matmul357: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape516, permute_dims358, out_dtype="void") add383: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul357, model_decoder_layers_12_encoder_attn_out_proj_bias2) add384: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add381, add383) layer_norm103: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add384, model_decoder_layers_12_final_layer_norm_weight2, model_decoder_layers_12_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims359: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc1_weight2, axes=None) matmul358: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm103, permute_dims359, out_dtype="void") add385: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul358, model_decoder_layers_12_fc1_bias2) gelu46: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add385) permute_dims360: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc2_weight2, axes=None) matmul359: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu46, permute_dims360, out_dtype="void") add386: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul359, model_decoder_layers_12_fc2_bias2) add387: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add384, add386) layer_norm104: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add387, model_decoder_layers_13_self_attn_layer_norm_weight2, model_decoder_layers_13_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims361: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_q_proj_weight2, axes=None) matmul360: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm104, permute_dims361, out_dtype="void") add388: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul360, model_decoder_layers_13_self_attn_q_proj_bias2) reshape517: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add388, R.shape([1, seq_len, 20, 64])) permute_dims362: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_k_proj_weight2, axes=None) matmul361: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm104, permute_dims362, out_dtype="void") reshape518: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul361, R.shape([1, seq_len, 20, 64])) permute_dims363: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_v_proj_weight2, axes=None) matmul362: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm104, permute_dims363, out_dtype="void") add389: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul362, model_decoder_layers_13_self_attn_v_proj_bias2) reshape519: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add389, R.shape([1, seq_len, 20, 64])) concat13: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape517, reshape518, reshape519), axis=2) reshape520: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat13, R.shape([seq_len, 60, 64])) lv95 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape520), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape521: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv95, R.shape([1, seq_len, 20, 64])) reshape522: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape521, R.shape([1, seq_len, 1280])) permute_dims364: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_out_proj_weight2, axes=None) matmul363: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape522, permute_dims364, out_dtype="void") add390: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul363, model_decoder_layers_13_self_attn_out_proj_bias2) add391: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add387, add390) layer_norm105: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add391, model_decoder_layers_13_encoder_attn_layer_norm_weight2, model_decoder_layers_13_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims365: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_q_proj_weight2, axes=None) matmul364: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm105, permute_dims365, out_dtype="void") add392: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul364, model_decoder_layers_13_encoder_attn_q_proj_bias2) reshape523: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add392, R.shape([1, seq_len, 20, 64])) reshape524: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape523, R.shape([seq_len, 20, 64])) lv96 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape524), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape525: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv96, R.shape([1, seq_len, 20, 64])) reshape526: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape525, R.shape([1, seq_len, 1280])) permute_dims366: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_out_proj_weight2, axes=None) matmul365: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape526, permute_dims366, out_dtype="void") add393: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul365, model_decoder_layers_13_encoder_attn_out_proj_bias2) add394: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add391, add393) layer_norm106: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add394, model_decoder_layers_13_final_layer_norm_weight2, model_decoder_layers_13_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims367: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc1_weight2, axes=None) matmul366: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm106, permute_dims367, out_dtype="void") add395: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul366, model_decoder_layers_13_fc1_bias2) gelu47: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add395) permute_dims368: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc2_weight2, axes=None) matmul367: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu47, permute_dims368, out_dtype="void") add396: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul367, model_decoder_layers_13_fc2_bias2) add397: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add394, add396) layer_norm107: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add397, model_decoder_layers_14_self_attn_layer_norm_weight2, model_decoder_layers_14_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims369: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_q_proj_weight2, axes=None) matmul368: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm107, permute_dims369, out_dtype="void") add398: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul368, model_decoder_layers_14_self_attn_q_proj_bias2) reshape527: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add398, R.shape([1, seq_len, 20, 64])) permute_dims370: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_k_proj_weight2, axes=None) matmul369: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm107, permute_dims370, out_dtype="void") reshape528: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul369, R.shape([1, seq_len, 20, 64])) permute_dims371: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_v_proj_weight2, axes=None) matmul370: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm107, permute_dims371, out_dtype="void") add399: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul370, model_decoder_layers_14_self_attn_v_proj_bias2) reshape529: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add399, R.shape([1, seq_len, 20, 64])) concat14: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape527, reshape528, reshape529), axis=2) reshape530: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat14, R.shape([seq_len, 60, 64])) lv97 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape530), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape531: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv97, R.shape([1, seq_len, 20, 64])) reshape532: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape531, R.shape([1, seq_len, 1280])) permute_dims372: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_out_proj_weight2, axes=None) matmul371: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape532, permute_dims372, out_dtype="void") add400: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul371, model_decoder_layers_14_self_attn_out_proj_bias2) add401: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add397, add400) layer_norm108: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add401, model_decoder_layers_14_encoder_attn_layer_norm_weight2, model_decoder_layers_14_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims373: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_q_proj_weight2, axes=None) matmul372: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm108, permute_dims373, out_dtype="void") add402: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul372, model_decoder_layers_14_encoder_attn_q_proj_bias2) reshape533: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add402, R.shape([1, seq_len, 20, 64])) reshape534: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape533, R.shape([seq_len, 20, 64])) lv98 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape534), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape535: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv98, R.shape([1, seq_len, 20, 64])) reshape536: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape535, R.shape([1, seq_len, 1280])) permute_dims374: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_out_proj_weight2, axes=None) matmul373: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape536, permute_dims374, out_dtype="void") add403: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul373, model_decoder_layers_14_encoder_attn_out_proj_bias2) add404: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add401, add403) layer_norm109: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add404, model_decoder_layers_14_final_layer_norm_weight2, model_decoder_layers_14_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims375: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc1_weight2, axes=None) matmul374: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm109, permute_dims375, out_dtype="void") add405: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul374, model_decoder_layers_14_fc1_bias2) gelu48: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add405) permute_dims376: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc2_weight2, axes=None) matmul375: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu48, permute_dims376, out_dtype="void") add406: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul375, model_decoder_layers_14_fc2_bias2) add407: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add404, add406) layer_norm110: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add407, model_decoder_layers_15_self_attn_layer_norm_weight2, model_decoder_layers_15_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims377: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_q_proj_weight2, axes=None) matmul376: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm110, permute_dims377, out_dtype="void") add408: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul376, model_decoder_layers_15_self_attn_q_proj_bias2) reshape537: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add408, R.shape([1, seq_len, 20, 64])) permute_dims378: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_k_proj_weight2, axes=None) matmul377: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm110, permute_dims378, out_dtype="void") reshape538: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul377, R.shape([1, seq_len, 20, 64])) permute_dims379: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_v_proj_weight2, axes=None) matmul378: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm110, permute_dims379, out_dtype="void") add409: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul378, model_decoder_layers_15_self_attn_v_proj_bias2) reshape539: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add409, R.shape([1, seq_len, 20, 64])) concat15: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape537, reshape538, reshape539), axis=2) reshape540: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat15, R.shape([seq_len, 60, 64])) lv99 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape540), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape541: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv99, R.shape([1, seq_len, 20, 64])) reshape542: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape541, R.shape([1, seq_len, 1280])) permute_dims380: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_out_proj_weight2, axes=None) matmul379: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape542, permute_dims380, out_dtype="void") add410: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul379, model_decoder_layers_15_self_attn_out_proj_bias2) add411: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add407, add410) layer_norm111: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add411, model_decoder_layers_15_encoder_attn_layer_norm_weight2, model_decoder_layers_15_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims381: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_q_proj_weight2, axes=None) matmul380: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm111, permute_dims381, out_dtype="void") add412: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul380, model_decoder_layers_15_encoder_attn_q_proj_bias2) reshape543: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add412, R.shape([1, seq_len, 20, 64])) reshape544: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape543, R.shape([seq_len, 20, 64])) lv100 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape544), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape545: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv100, R.shape([1, seq_len, 20, 64])) reshape546: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape545, R.shape([1, seq_len, 1280])) permute_dims382: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_out_proj_weight2, axes=None) matmul381: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape546, permute_dims382, out_dtype="void") add413: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul381, model_decoder_layers_15_encoder_attn_out_proj_bias2) add414: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add411, add413) layer_norm112: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add414, model_decoder_layers_15_final_layer_norm_weight2, model_decoder_layers_15_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims383: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc1_weight2, axes=None) matmul382: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm112, permute_dims383, out_dtype="void") add415: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul382, model_decoder_layers_15_fc1_bias2) gelu49: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add415) permute_dims384: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc2_weight2, axes=None) matmul383: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu49, permute_dims384, out_dtype="void") add416: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul383, model_decoder_layers_15_fc2_bias2) add417: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add414, add416) layer_norm113: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add417, model_decoder_layers_16_self_attn_layer_norm_weight2, model_decoder_layers_16_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims385: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_q_proj_weight2, axes=None) matmul384: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm113, permute_dims385, out_dtype="void") add418: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul384, model_decoder_layers_16_self_attn_q_proj_bias2) reshape547: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add418, R.shape([1, seq_len, 20, 64])) permute_dims386: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_k_proj_weight2, axes=None) matmul385: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm113, permute_dims386, out_dtype="void") reshape548: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul385, R.shape([1, seq_len, 20, 64])) permute_dims387: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_v_proj_weight2, axes=None) matmul386: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm113, permute_dims387, out_dtype="void") add419: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul386, model_decoder_layers_16_self_attn_v_proj_bias2) reshape549: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add419, R.shape([1, seq_len, 20, 64])) concat16: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape547, reshape548, reshape549), axis=2) reshape550: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat16, R.shape([seq_len, 60, 64])) lv101 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape550), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape551: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv101, R.shape([1, seq_len, 20, 64])) reshape552: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape551, R.shape([1, seq_len, 1280])) permute_dims388: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_out_proj_weight2, axes=None) matmul387: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape552, permute_dims388, out_dtype="void") add420: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul387, model_decoder_layers_16_self_attn_out_proj_bias2) add421: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add417, add420) layer_norm114: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add421, model_decoder_layers_16_encoder_attn_layer_norm_weight2, model_decoder_layers_16_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims389: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_q_proj_weight2, axes=None) matmul388: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm114, permute_dims389, out_dtype="void") add422: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul388, model_decoder_layers_16_encoder_attn_q_proj_bias2) reshape553: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add422, R.shape([1, seq_len, 20, 64])) reshape554: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape553, R.shape([seq_len, 20, 64])) lv102 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape554), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape555: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv102, R.shape([1, seq_len, 20, 64])) reshape556: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape555, R.shape([1, seq_len, 1280])) permute_dims390: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_out_proj_weight2, axes=None) matmul389: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape556, permute_dims390, out_dtype="void") add423: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul389, model_decoder_layers_16_encoder_attn_out_proj_bias2) add424: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add421, add423) layer_norm115: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add424, model_decoder_layers_16_final_layer_norm_weight2, model_decoder_layers_16_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims391: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc1_weight2, axes=None) matmul390: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm115, permute_dims391, out_dtype="void") add425: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul390, model_decoder_layers_16_fc1_bias2) gelu50: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add425) permute_dims392: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc2_weight2, axes=None) matmul391: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu50, permute_dims392, out_dtype="void") add426: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul391, model_decoder_layers_16_fc2_bias2) add427: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add424, add426) layer_norm116: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add427, model_decoder_layers_17_self_attn_layer_norm_weight2, model_decoder_layers_17_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims393: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_q_proj_weight2, axes=None) matmul392: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm116, permute_dims393, out_dtype="void") add428: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul392, model_decoder_layers_17_self_attn_q_proj_bias2) reshape557: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add428, R.shape([1, seq_len, 20, 64])) permute_dims394: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_k_proj_weight2, axes=None) matmul393: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm116, permute_dims394, out_dtype="void") reshape558: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul393, R.shape([1, seq_len, 20, 64])) permute_dims395: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_v_proj_weight2, axes=None) matmul394: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm116, permute_dims395, out_dtype="void") add429: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul394, model_decoder_layers_17_self_attn_v_proj_bias2) reshape559: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add429, R.shape([1, seq_len, 20, 64])) concat17: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape557, reshape558, reshape559), axis=2) reshape560: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat17, R.shape([seq_len, 60, 64])) lv103 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape560), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape561: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv103, R.shape([1, seq_len, 20, 64])) reshape562: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape561, R.shape([1, seq_len, 1280])) permute_dims396: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_out_proj_weight2, axes=None) matmul395: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape562, permute_dims396, out_dtype="void") add430: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul395, model_decoder_layers_17_self_attn_out_proj_bias2) add431: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add427, add430) layer_norm117: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add431, model_decoder_layers_17_encoder_attn_layer_norm_weight2, model_decoder_layers_17_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims397: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_q_proj_weight2, axes=None) matmul396: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm117, permute_dims397, out_dtype="void") add432: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul396, model_decoder_layers_17_encoder_attn_q_proj_bias2) reshape563: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add432, R.shape([1, seq_len, 20, 64])) reshape564: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape563, R.shape([seq_len, 20, 64])) lv104 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape564), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape565: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv104, R.shape([1, seq_len, 20, 64])) reshape566: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape565, R.shape([1, seq_len, 1280])) permute_dims398: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_out_proj_weight2, axes=None) matmul397: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape566, permute_dims398, out_dtype="void") add433: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul397, model_decoder_layers_17_encoder_attn_out_proj_bias2) add434: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add431, add433) layer_norm118: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add434, model_decoder_layers_17_final_layer_norm_weight2, model_decoder_layers_17_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims399: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc1_weight2, axes=None) matmul398: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm118, permute_dims399, out_dtype="void") add435: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul398, model_decoder_layers_17_fc1_bias2) gelu51: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add435) permute_dims400: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc2_weight2, axes=None) matmul399: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu51, permute_dims400, out_dtype="void") add436: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul399, model_decoder_layers_17_fc2_bias2) add437: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add434, add436) layer_norm119: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add437, model_decoder_layers_18_self_attn_layer_norm_weight2, model_decoder_layers_18_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims401: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_q_proj_weight2, axes=None) matmul400: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm119, permute_dims401, out_dtype="void") add438: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul400, model_decoder_layers_18_self_attn_q_proj_bias2) reshape567: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add438, R.shape([1, seq_len, 20, 64])) permute_dims402: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_k_proj_weight2, axes=None) matmul401: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm119, permute_dims402, out_dtype="void") reshape568: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul401, R.shape([1, seq_len, 20, 64])) permute_dims403: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_v_proj_weight2, axes=None) matmul402: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm119, permute_dims403, out_dtype="void") add439: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul402, model_decoder_layers_18_self_attn_v_proj_bias2) reshape569: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add439, R.shape([1, seq_len, 20, 64])) concat18: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape567, reshape568, reshape569), axis=2) reshape570: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat18, R.shape([seq_len, 60, 64])) lv105 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape570), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape571: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv105, R.shape([1, seq_len, 20, 64])) reshape572: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape571, R.shape([1, seq_len, 1280])) permute_dims404: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_out_proj_weight2, axes=None) matmul403: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape572, permute_dims404, out_dtype="void") add440: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul403, model_decoder_layers_18_self_attn_out_proj_bias2) add441: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add437, add440) layer_norm120: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add441, model_decoder_layers_18_encoder_attn_layer_norm_weight2, model_decoder_layers_18_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims405: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_q_proj_weight2, axes=None) matmul404: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm120, permute_dims405, out_dtype="void") add442: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul404, model_decoder_layers_18_encoder_attn_q_proj_bias2) reshape573: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add442, R.shape([1, seq_len, 20, 64])) reshape574: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape573, R.shape([seq_len, 20, 64])) lv106 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape574), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape575: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv106, R.shape([1, seq_len, 20, 64])) reshape576: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape575, R.shape([1, seq_len, 1280])) permute_dims406: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_out_proj_weight2, axes=None) matmul405: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape576, permute_dims406, out_dtype="void") add443: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul405, model_decoder_layers_18_encoder_attn_out_proj_bias2) add444: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add441, add443) layer_norm121: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add444, model_decoder_layers_18_final_layer_norm_weight2, model_decoder_layers_18_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims407: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc1_weight2, axes=None) matmul406: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm121, permute_dims407, out_dtype="void") add445: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul406, model_decoder_layers_18_fc1_bias2) gelu52: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add445) permute_dims408: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc2_weight2, axes=None) matmul407: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu52, permute_dims408, out_dtype="void") add446: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul407, model_decoder_layers_18_fc2_bias2) add447: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add444, add446) layer_norm122: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add447, model_decoder_layers_19_self_attn_layer_norm_weight2, model_decoder_layers_19_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims409: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_q_proj_weight2, axes=None) matmul408: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm122, permute_dims409, out_dtype="void") add448: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul408, model_decoder_layers_19_self_attn_q_proj_bias2) reshape577: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add448, R.shape([1, seq_len, 20, 64])) permute_dims410: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_k_proj_weight2, axes=None) matmul409: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm122, permute_dims410, out_dtype="void") reshape578: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul409, R.shape([1, seq_len, 20, 64])) permute_dims411: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_v_proj_weight2, axes=None) matmul410: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm122, permute_dims411, out_dtype="void") add449: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul410, model_decoder_layers_19_self_attn_v_proj_bias2) reshape579: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add449, R.shape([1, seq_len, 20, 64])) concat19: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape577, reshape578, reshape579), axis=2) reshape580: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat19, R.shape([seq_len, 60, 64])) lv107 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape580), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape581: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv107, R.shape([1, seq_len, 20, 64])) reshape582: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape581, R.shape([1, seq_len, 1280])) permute_dims412: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_out_proj_weight2, axes=None) matmul411: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape582, permute_dims412, out_dtype="void") add450: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul411, model_decoder_layers_19_self_attn_out_proj_bias2) add451: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add447, add450) layer_norm123: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add451, model_decoder_layers_19_encoder_attn_layer_norm_weight2, model_decoder_layers_19_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims413: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_q_proj_weight2, axes=None) matmul412: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm123, permute_dims413, out_dtype="void") add452: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul412, model_decoder_layers_19_encoder_attn_q_proj_bias2) reshape583: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add452, R.shape([1, seq_len, 20, 64])) reshape584: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape583, R.shape([seq_len, 20, 64])) lv108 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape584), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape585: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv108, R.shape([1, seq_len, 20, 64])) reshape586: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape585, R.shape([1, seq_len, 1280])) permute_dims414: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_out_proj_weight2, axes=None) matmul413: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape586, permute_dims414, out_dtype="void") add453: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul413, model_decoder_layers_19_encoder_attn_out_proj_bias2) add454: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add451, add453) layer_norm124: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add454, model_decoder_layers_19_final_layer_norm_weight2, model_decoder_layers_19_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims415: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc1_weight2, axes=None) matmul414: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm124, permute_dims415, out_dtype="void") add455: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul414, model_decoder_layers_19_fc1_bias2) gelu53: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add455) permute_dims416: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc2_weight2, axes=None) matmul415: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu53, permute_dims416, out_dtype="void") add456: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul415, model_decoder_layers_19_fc2_bias2) add457: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add454, add456) layer_norm125: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add457, model_decoder_layers_20_self_attn_layer_norm_weight2, model_decoder_layers_20_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims417: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_q_proj_weight2, axes=None) matmul416: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm125, permute_dims417, out_dtype="void") add458: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul416, model_decoder_layers_20_self_attn_q_proj_bias2) reshape587: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add458, R.shape([1, seq_len, 20, 64])) permute_dims418: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_k_proj_weight2, axes=None) matmul417: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm125, permute_dims418, out_dtype="void") reshape588: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul417, R.shape([1, seq_len, 20, 64])) permute_dims419: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_v_proj_weight2, axes=None) matmul418: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm125, permute_dims419, out_dtype="void") add459: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul418, model_decoder_layers_20_self_attn_v_proj_bias2) reshape589: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add459, R.shape([1, seq_len, 20, 64])) concat20: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape587, reshape588, reshape589), axis=2) reshape590: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat20, R.shape([seq_len, 60, 64])) lv109 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape590), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape591: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv109, R.shape([1, seq_len, 20, 64])) reshape592: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape591, R.shape([1, seq_len, 1280])) permute_dims420: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_out_proj_weight2, axes=None) matmul419: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape592, permute_dims420, out_dtype="void") add460: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul419, model_decoder_layers_20_self_attn_out_proj_bias2) add461: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add457, add460) layer_norm126: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add461, model_decoder_layers_20_encoder_attn_layer_norm_weight2, model_decoder_layers_20_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims421: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_q_proj_weight2, axes=None) matmul420: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm126, permute_dims421, out_dtype="void") add462: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul420, model_decoder_layers_20_encoder_attn_q_proj_bias2) reshape593: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add462, R.shape([1, seq_len, 20, 64])) reshape594: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape593, R.shape([seq_len, 20, 64])) lv110 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape594), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape595: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv110, R.shape([1, seq_len, 20, 64])) reshape596: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape595, R.shape([1, seq_len, 1280])) permute_dims422: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_out_proj_weight2, axes=None) matmul421: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape596, permute_dims422, out_dtype="void") add463: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul421, model_decoder_layers_20_encoder_attn_out_proj_bias2) add464: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add461, add463) layer_norm127: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add464, model_decoder_layers_20_final_layer_norm_weight2, model_decoder_layers_20_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims423: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc1_weight2, axes=None) matmul422: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm127, permute_dims423, out_dtype="void") add465: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul422, model_decoder_layers_20_fc1_bias2) gelu54: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add465) permute_dims424: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc2_weight2, axes=None) matmul423: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu54, permute_dims424, out_dtype="void") add466: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul423, model_decoder_layers_20_fc2_bias2) add467: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add464, add466) layer_norm128: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add467, model_decoder_layers_21_self_attn_layer_norm_weight2, model_decoder_layers_21_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims425: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_q_proj_weight2, axes=None) matmul424: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm128, permute_dims425, out_dtype="void") add468: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul424, model_decoder_layers_21_self_attn_q_proj_bias2) reshape597: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add468, R.shape([1, seq_len, 20, 64])) permute_dims426: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_k_proj_weight2, axes=None) matmul425: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm128, permute_dims426, out_dtype="void") reshape598: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul425, R.shape([1, seq_len, 20, 64])) permute_dims427: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_v_proj_weight2, axes=None) matmul426: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm128, permute_dims427, out_dtype="void") add469: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul426, model_decoder_layers_21_self_attn_v_proj_bias2) reshape599: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add469, R.shape([1, seq_len, 20, 64])) concat21: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape597, reshape598, reshape599), axis=2) reshape600: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat21, R.shape([seq_len, 60, 64])) lv111 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape600), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape601: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv111, R.shape([1, seq_len, 20, 64])) reshape602: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape601, R.shape([1, seq_len, 1280])) permute_dims428: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_out_proj_weight2, axes=None) matmul427: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape602, permute_dims428, out_dtype="void") add470: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul427, model_decoder_layers_21_self_attn_out_proj_bias2) add471: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add467, add470) layer_norm129: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add471, model_decoder_layers_21_encoder_attn_layer_norm_weight2, model_decoder_layers_21_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims429: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_q_proj_weight2, axes=None) matmul428: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm129, permute_dims429, out_dtype="void") add472: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul428, model_decoder_layers_21_encoder_attn_q_proj_bias2) reshape603: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add472, R.shape([1, seq_len, 20, 64])) reshape604: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape603, R.shape([seq_len, 20, 64])) lv112 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape604), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape605: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv112, R.shape([1, seq_len, 20, 64])) reshape606: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape605, R.shape([1, seq_len, 1280])) permute_dims430: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_out_proj_weight2, axes=None) matmul429: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape606, permute_dims430, out_dtype="void") add473: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul429, model_decoder_layers_21_encoder_attn_out_proj_bias2) add474: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add471, add473) layer_norm130: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add474, model_decoder_layers_21_final_layer_norm_weight2, model_decoder_layers_21_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims431: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc1_weight2, axes=None) matmul430: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm130, permute_dims431, out_dtype="void") add475: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul430, model_decoder_layers_21_fc1_bias2) gelu55: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add475) permute_dims432: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc2_weight2, axes=None) matmul431: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu55, permute_dims432, out_dtype="void") add476: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul431, model_decoder_layers_21_fc2_bias2) add477: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add474, add476) layer_norm131: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add477, model_decoder_layers_22_self_attn_layer_norm_weight2, model_decoder_layers_22_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims433: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_q_proj_weight2, axes=None) matmul432: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm131, permute_dims433, out_dtype="void") add478: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul432, model_decoder_layers_22_self_attn_q_proj_bias2) reshape607: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add478, R.shape([1, seq_len, 20, 64])) permute_dims434: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_k_proj_weight2, axes=None) matmul433: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm131, permute_dims434, out_dtype="void") reshape608: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul433, R.shape([1, seq_len, 20, 64])) permute_dims435: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_v_proj_weight2, axes=None) matmul434: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm131, permute_dims435, out_dtype="void") add479: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul434, model_decoder_layers_22_self_attn_v_proj_bias2) reshape609: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add479, R.shape([1, seq_len, 20, 64])) concat22: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape607, reshape608, reshape609), axis=2) reshape610: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat22, R.shape([seq_len, 60, 64])) lv113 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape610), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape611: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv113, R.shape([1, seq_len, 20, 64])) reshape612: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape611, R.shape([1, seq_len, 1280])) permute_dims436: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_out_proj_weight2, axes=None) matmul435: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape612, permute_dims436, out_dtype="void") add480: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul435, model_decoder_layers_22_self_attn_out_proj_bias2) add481: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add477, add480) layer_norm132: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add481, model_decoder_layers_22_encoder_attn_layer_norm_weight2, model_decoder_layers_22_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims437: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_q_proj_weight2, axes=None) matmul436: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm132, permute_dims437, out_dtype="void") add482: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul436, model_decoder_layers_22_encoder_attn_q_proj_bias2) reshape613: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add482, R.shape([1, seq_len, 20, 64])) reshape614: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape613, R.shape([seq_len, 20, 64])) lv114 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape614), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape615: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv114, R.shape([1, seq_len, 20, 64])) reshape616: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape615, R.shape([1, seq_len, 1280])) permute_dims438: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_out_proj_weight2, axes=None) matmul437: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape616, permute_dims438, out_dtype="void") add483: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul437, model_decoder_layers_22_encoder_attn_out_proj_bias2) add484: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add481, add483) layer_norm133: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add484, model_decoder_layers_22_final_layer_norm_weight2, model_decoder_layers_22_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims439: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc1_weight2, axes=None) matmul438: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm133, permute_dims439, out_dtype="void") add485: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul438, model_decoder_layers_22_fc1_bias2) gelu56: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add485) permute_dims440: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc2_weight2, axes=None) matmul439: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu56, permute_dims440, out_dtype="void") add486: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul439, model_decoder_layers_22_fc2_bias2) add487: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add484, add486) layer_norm134: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add487, model_decoder_layers_23_self_attn_layer_norm_weight2, model_decoder_layers_23_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims441: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_q_proj_weight2, axes=None) matmul440: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm134, permute_dims441, out_dtype="void") add488: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul440, model_decoder_layers_23_self_attn_q_proj_bias2) reshape617: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add488, R.shape([1, seq_len, 20, 64])) permute_dims442: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_k_proj_weight2, axes=None) matmul441: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm134, permute_dims442, out_dtype="void") reshape618: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul441, R.shape([1, seq_len, 20, 64])) permute_dims443: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_v_proj_weight2, axes=None) matmul442: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm134, permute_dims443, out_dtype="void") add489: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul442, model_decoder_layers_23_self_attn_v_proj_bias2) reshape619: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add489, R.shape([1, seq_len, 20, 64])) concat23: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape617, reshape618, reshape619), axis=2) reshape620: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat23, R.shape([seq_len, 60, 64])) lv115 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape620), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape621: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv115, R.shape([1, seq_len, 20, 64])) reshape622: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape621, R.shape([1, seq_len, 1280])) permute_dims444: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_out_proj_weight2, axes=None) matmul443: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape622, permute_dims444, out_dtype="void") add490: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul443, model_decoder_layers_23_self_attn_out_proj_bias2) add491: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add487, add490) layer_norm135: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add491, model_decoder_layers_23_encoder_attn_layer_norm_weight2, model_decoder_layers_23_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims445: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_q_proj_weight2, axes=None) matmul444: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm135, permute_dims445, out_dtype="void") add492: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul444, model_decoder_layers_23_encoder_attn_q_proj_bias2) reshape623: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add492, R.shape([1, seq_len, 20, 64])) reshape624: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape623, R.shape([seq_len, 20, 64])) lv116 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape624), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape625: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv116, R.shape([1, seq_len, 20, 64])) reshape626: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape625, R.shape([1, seq_len, 1280])) permute_dims446: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_out_proj_weight2, axes=None) matmul445: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape626, permute_dims446, out_dtype="void") add493: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul445, model_decoder_layers_23_encoder_attn_out_proj_bias2) add494: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add491, add493) layer_norm136: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add494, model_decoder_layers_23_final_layer_norm_weight2, model_decoder_layers_23_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims447: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc1_weight2, axes=None) matmul446: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm136, permute_dims447, out_dtype="void") add495: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul446, model_decoder_layers_23_fc1_bias2) gelu57: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add495) permute_dims448: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc2_weight2, axes=None) matmul447: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu57, permute_dims448, out_dtype="void") add496: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul447, model_decoder_layers_23_fc2_bias2) add497: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add494, add496) layer_norm137: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add497, model_decoder_layers_24_self_attn_layer_norm_weight2, model_decoder_layers_24_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims449: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_q_proj_weight2, axes=None) matmul448: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm137, permute_dims449, out_dtype="void") add498: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul448, model_decoder_layers_24_self_attn_q_proj_bias2) reshape627: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add498, R.shape([1, seq_len, 20, 64])) permute_dims450: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_k_proj_weight2, axes=None) matmul449: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm137, permute_dims450, out_dtype="void") reshape628: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul449, R.shape([1, seq_len, 20, 64])) permute_dims451: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_v_proj_weight2, axes=None) matmul450: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm137, permute_dims451, out_dtype="void") add499: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul450, model_decoder_layers_24_self_attn_v_proj_bias2) reshape629: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add499, R.shape([1, seq_len, 20, 64])) concat24: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape627, reshape628, reshape629), axis=2) reshape630: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat24, R.shape([seq_len, 60, 64])) lv117 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape630), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape631: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv117, R.shape([1, seq_len, 20, 64])) reshape632: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape631, R.shape([1, seq_len, 1280])) permute_dims452: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_out_proj_weight2, axes=None) matmul451: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape632, permute_dims452, out_dtype="void") add500: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul451, model_decoder_layers_24_self_attn_out_proj_bias2) add501: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add497, add500) layer_norm138: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add501, model_decoder_layers_24_encoder_attn_layer_norm_weight2, model_decoder_layers_24_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims453: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_q_proj_weight2, axes=None) matmul452: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm138, permute_dims453, out_dtype="void") add502: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul452, model_decoder_layers_24_encoder_attn_q_proj_bias2) reshape633: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add502, R.shape([1, seq_len, 20, 64])) reshape634: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape633, R.shape([seq_len, 20, 64])) lv118 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape634), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape635: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv118, R.shape([1, seq_len, 20, 64])) reshape636: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape635, R.shape([1, seq_len, 1280])) permute_dims454: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_out_proj_weight2, axes=None) matmul453: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape636, permute_dims454, out_dtype="void") add503: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul453, model_decoder_layers_24_encoder_attn_out_proj_bias2) add504: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add501, add503) layer_norm139: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add504, model_decoder_layers_24_final_layer_norm_weight2, model_decoder_layers_24_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims455: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc1_weight2, axes=None) matmul454: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm139, permute_dims455, out_dtype="void") add505: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul454, model_decoder_layers_24_fc1_bias2) gelu58: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add505) permute_dims456: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc2_weight2, axes=None) matmul455: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu58, permute_dims456, out_dtype="void") add506: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul455, model_decoder_layers_24_fc2_bias2) add507: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add504, add506) layer_norm140: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add507, model_decoder_layers_25_self_attn_layer_norm_weight2, model_decoder_layers_25_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims457: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_q_proj_weight2, axes=None) matmul456: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm140, permute_dims457, out_dtype="void") add508: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul456, model_decoder_layers_25_self_attn_q_proj_bias2) reshape637: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add508, R.shape([1, seq_len, 20, 64])) permute_dims458: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_k_proj_weight2, axes=None) matmul457: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm140, permute_dims458, out_dtype="void") reshape638: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul457, R.shape([1, seq_len, 20, 64])) permute_dims459: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_v_proj_weight2, axes=None) matmul458: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm140, permute_dims459, out_dtype="void") add509: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul458, model_decoder_layers_25_self_attn_v_proj_bias2) reshape639: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add509, R.shape([1, seq_len, 20, 64])) concat25: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape637, reshape638, reshape639), axis=2) reshape640: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat25, R.shape([seq_len, 60, 64])) lv119 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape640), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape641: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv119, R.shape([1, seq_len, 20, 64])) reshape642: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape641, R.shape([1, seq_len, 1280])) permute_dims460: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_out_proj_weight2, axes=None) matmul459: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape642, permute_dims460, out_dtype="void") add510: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul459, model_decoder_layers_25_self_attn_out_proj_bias2) add511: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add507, add510) layer_norm141: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add511, model_decoder_layers_25_encoder_attn_layer_norm_weight2, model_decoder_layers_25_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims461: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_q_proj_weight2, axes=None) matmul460: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm141, permute_dims461, out_dtype="void") add512: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul460, model_decoder_layers_25_encoder_attn_q_proj_bias2) reshape643: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add512, R.shape([1, seq_len, 20, 64])) reshape644: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape643, R.shape([seq_len, 20, 64])) lv120 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape644), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape645: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv120, R.shape([1, seq_len, 20, 64])) reshape646: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape645, R.shape([1, seq_len, 1280])) permute_dims462: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_out_proj_weight2, axes=None) matmul461: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape646, permute_dims462, out_dtype="void") add513: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul461, model_decoder_layers_25_encoder_attn_out_proj_bias2) add514: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add511, add513) layer_norm142: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add514, model_decoder_layers_25_final_layer_norm_weight2, model_decoder_layers_25_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims463: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc1_weight2, axes=None) matmul462: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm142, permute_dims463, out_dtype="void") add515: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul462, model_decoder_layers_25_fc1_bias2) gelu59: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add515) permute_dims464: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc2_weight2, axes=None) matmul463: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu59, permute_dims464, out_dtype="void") add516: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul463, model_decoder_layers_25_fc2_bias2) add517: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add514, add516) layer_norm143: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add517, model_decoder_layers_26_self_attn_layer_norm_weight2, model_decoder_layers_26_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims465: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_q_proj_weight2, axes=None) matmul464: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm143, permute_dims465, out_dtype="void") add518: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul464, model_decoder_layers_26_self_attn_q_proj_bias2) reshape647: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add518, R.shape([1, seq_len, 20, 64])) permute_dims466: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_k_proj_weight2, axes=None) matmul465: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm143, permute_dims466, out_dtype="void") reshape648: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul465, R.shape([1, seq_len, 20, 64])) permute_dims467: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_v_proj_weight2, axes=None) matmul466: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm143, permute_dims467, out_dtype="void") add519: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul466, model_decoder_layers_26_self_attn_v_proj_bias2) reshape649: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add519, R.shape([1, seq_len, 20, 64])) concat26: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape647, reshape648, reshape649), axis=2) reshape650: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat26, R.shape([seq_len, 60, 64])) lv121 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape650), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape651: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv121, R.shape([1, seq_len, 20, 64])) reshape652: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape651, R.shape([1, seq_len, 1280])) permute_dims468: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_out_proj_weight2, axes=None) matmul467: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape652, permute_dims468, out_dtype="void") add520: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul467, model_decoder_layers_26_self_attn_out_proj_bias2) add521: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add517, add520) layer_norm144: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add521, model_decoder_layers_26_encoder_attn_layer_norm_weight2, model_decoder_layers_26_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims469: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_q_proj_weight2, axes=None) matmul468: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm144, permute_dims469, out_dtype="void") add522: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul468, model_decoder_layers_26_encoder_attn_q_proj_bias2) reshape653: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add522, R.shape([1, seq_len, 20, 64])) reshape654: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape653, R.shape([seq_len, 20, 64])) lv122 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape654), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape655: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv122, R.shape([1, seq_len, 20, 64])) reshape656: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape655, R.shape([1, seq_len, 1280])) permute_dims470: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_out_proj_weight2, axes=None) matmul469: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape656, permute_dims470, out_dtype="void") add523: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul469, model_decoder_layers_26_encoder_attn_out_proj_bias2) add524: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add521, add523) layer_norm145: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add524, model_decoder_layers_26_final_layer_norm_weight2, model_decoder_layers_26_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims471: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc1_weight2, axes=None) matmul470: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm145, permute_dims471, out_dtype="void") add525: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul470, model_decoder_layers_26_fc1_bias2) gelu60: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add525) permute_dims472: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc2_weight2, axes=None) matmul471: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu60, permute_dims472, out_dtype="void") add526: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul471, model_decoder_layers_26_fc2_bias2) add527: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add524, add526) layer_norm146: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add527, model_decoder_layers_27_self_attn_layer_norm_weight2, model_decoder_layers_27_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims473: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_q_proj_weight2, axes=None) matmul472: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm146, permute_dims473, out_dtype="void") add528: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul472, model_decoder_layers_27_self_attn_q_proj_bias2) reshape657: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add528, R.shape([1, seq_len, 20, 64])) permute_dims474: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_k_proj_weight2, axes=None) matmul473: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm146, permute_dims474, out_dtype="void") reshape658: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul473, R.shape([1, seq_len, 20, 64])) permute_dims475: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_v_proj_weight2, axes=None) matmul474: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm146, permute_dims475, out_dtype="void") add529: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul474, model_decoder_layers_27_self_attn_v_proj_bias2) reshape659: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add529, R.shape([1, seq_len, 20, 64])) concat27: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape657, reshape658, reshape659), axis=2) reshape660: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat27, R.shape([seq_len, 60, 64])) lv123 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape660), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape661: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv123, R.shape([1, seq_len, 20, 64])) reshape662: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape661, R.shape([1, seq_len, 1280])) permute_dims476: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_out_proj_weight2, axes=None) matmul475: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape662, permute_dims476, out_dtype="void") add530: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul475, model_decoder_layers_27_self_attn_out_proj_bias2) add531: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add527, add530) layer_norm147: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add531, model_decoder_layers_27_encoder_attn_layer_norm_weight2, model_decoder_layers_27_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims477: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_q_proj_weight2, axes=None) matmul476: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm147, permute_dims477, out_dtype="void") add532: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul476, model_decoder_layers_27_encoder_attn_q_proj_bias2) reshape663: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add532, R.shape([1, seq_len, 20, 64])) reshape664: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape663, R.shape([seq_len, 20, 64])) lv124 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape664), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape665: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv124, R.shape([1, seq_len, 20, 64])) reshape666: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape665, R.shape([1, seq_len, 1280])) permute_dims478: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_out_proj_weight2, axes=None) matmul477: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape666, permute_dims478, out_dtype="void") add533: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul477, model_decoder_layers_27_encoder_attn_out_proj_bias2) add534: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add531, add533) layer_norm148: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add534, model_decoder_layers_27_final_layer_norm_weight2, model_decoder_layers_27_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims479: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc1_weight2, axes=None) matmul478: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm148, permute_dims479, out_dtype="void") add535: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul478, model_decoder_layers_27_fc1_bias2) gelu61: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add535) permute_dims480: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc2_weight2, axes=None) matmul479: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu61, permute_dims480, out_dtype="void") add536: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul479, model_decoder_layers_27_fc2_bias2) add537: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add534, add536) layer_norm149: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add537, model_decoder_layers_28_self_attn_layer_norm_weight2, model_decoder_layers_28_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims481: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_q_proj_weight2, axes=None) matmul480: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm149, permute_dims481, out_dtype="void") add538: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul480, model_decoder_layers_28_self_attn_q_proj_bias2) reshape667: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add538, R.shape([1, seq_len, 20, 64])) permute_dims482: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_k_proj_weight2, axes=None) matmul481: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm149, permute_dims482, out_dtype="void") reshape668: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul481, R.shape([1, seq_len, 20, 64])) permute_dims483: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_v_proj_weight2, axes=None) matmul482: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm149, permute_dims483, out_dtype="void") add539: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul482, model_decoder_layers_28_self_attn_v_proj_bias2) reshape669: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add539, R.shape([1, seq_len, 20, 64])) concat28: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape667, reshape668, reshape669), axis=2) reshape670: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat28, R.shape([seq_len, 60, 64])) lv125 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape670), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape671: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv125, R.shape([1, seq_len, 20, 64])) reshape672: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape671, R.shape([1, seq_len, 1280])) permute_dims484: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_out_proj_weight2, axes=None) matmul483: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape672, permute_dims484, out_dtype="void") add540: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul483, model_decoder_layers_28_self_attn_out_proj_bias2) add541: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add537, add540) layer_norm150: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add541, model_decoder_layers_28_encoder_attn_layer_norm_weight2, model_decoder_layers_28_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims485: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_q_proj_weight2, axes=None) matmul484: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm150, permute_dims485, out_dtype="void") add542: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul484, model_decoder_layers_28_encoder_attn_q_proj_bias2) reshape673: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add542, R.shape([1, seq_len, 20, 64])) reshape674: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape673, R.shape([seq_len, 20, 64])) lv126 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape674), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape675: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv126, R.shape([1, seq_len, 20, 64])) reshape676: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape675, R.shape([1, seq_len, 1280])) permute_dims486: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_out_proj_weight2, axes=None) matmul485: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape676, permute_dims486, out_dtype="void") add543: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul485, model_decoder_layers_28_encoder_attn_out_proj_bias2) add544: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add541, add543) layer_norm151: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add544, model_decoder_layers_28_final_layer_norm_weight2, model_decoder_layers_28_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims487: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc1_weight2, axes=None) matmul486: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm151, permute_dims487, out_dtype="void") add545: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul486, model_decoder_layers_28_fc1_bias2) gelu62: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add545) permute_dims488: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc2_weight2, axes=None) matmul487: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu62, permute_dims488, out_dtype="void") add546: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul487, model_decoder_layers_28_fc2_bias2) add547: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add544, add546) layer_norm152: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add547, model_decoder_layers_29_self_attn_layer_norm_weight2, model_decoder_layers_29_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims489: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_q_proj_weight2, axes=None) matmul488: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm152, permute_dims489, out_dtype="void") add548: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul488, model_decoder_layers_29_self_attn_q_proj_bias2) reshape677: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add548, R.shape([1, seq_len, 20, 64])) permute_dims490: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_k_proj_weight2, axes=None) matmul489: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm152, permute_dims490, out_dtype="void") reshape678: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul489, R.shape([1, seq_len, 20, 64])) permute_dims491: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_v_proj_weight2, axes=None) matmul490: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm152, permute_dims491, out_dtype="void") add549: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul490, model_decoder_layers_29_self_attn_v_proj_bias2) reshape679: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add549, R.shape([1, seq_len, 20, 64])) concat29: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape677, reshape678, reshape679), axis=2) reshape680: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat29, R.shape([seq_len, 60, 64])) lv127 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape680), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape681: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv127, R.shape([1, seq_len, 20, 64])) reshape682: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape681, R.shape([1, seq_len, 1280])) permute_dims492: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_out_proj_weight2, axes=None) matmul491: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape682, permute_dims492, out_dtype="void") add550: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul491, model_decoder_layers_29_self_attn_out_proj_bias2) add551: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add547, add550) layer_norm153: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add551, model_decoder_layers_29_encoder_attn_layer_norm_weight2, model_decoder_layers_29_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims493: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_q_proj_weight2, axes=None) matmul492: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm153, permute_dims493, out_dtype="void") add552: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul492, model_decoder_layers_29_encoder_attn_q_proj_bias2) reshape683: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add552, R.shape([1, seq_len, 20, 64])) reshape684: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape683, R.shape([seq_len, 20, 64])) lv128 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape684), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape685: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv128, R.shape([1, seq_len, 20, 64])) reshape686: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape685, R.shape([1, seq_len, 1280])) permute_dims494: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_out_proj_weight2, axes=None) matmul493: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape686, permute_dims494, out_dtype="void") add553: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul493, model_decoder_layers_29_encoder_attn_out_proj_bias2) add554: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add551, add553) layer_norm154: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add554, model_decoder_layers_29_final_layer_norm_weight2, model_decoder_layers_29_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims495: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc1_weight2, axes=None) matmul494: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm154, permute_dims495, out_dtype="void") add555: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul494, model_decoder_layers_29_fc1_bias2) gelu63: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add555) permute_dims496: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc2_weight2, axes=None) matmul495: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu63, permute_dims496, out_dtype="void") add556: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul495, model_decoder_layers_29_fc2_bias2) add557: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add554, add556) layer_norm155: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add557, model_decoder_layers_30_self_attn_layer_norm_weight2, model_decoder_layers_30_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims497: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_q_proj_weight2, axes=None) matmul496: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm155, permute_dims497, out_dtype="void") add558: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul496, model_decoder_layers_30_self_attn_q_proj_bias2) reshape687: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add558, R.shape([1, seq_len, 20, 64])) permute_dims498: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_k_proj_weight2, axes=None) matmul497: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm155, permute_dims498, out_dtype="void") reshape688: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul497, R.shape([1, seq_len, 20, 64])) permute_dims499: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_v_proj_weight2, axes=None) matmul498: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm155, permute_dims499, out_dtype="void") add559: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul498, model_decoder_layers_30_self_attn_v_proj_bias2) reshape689: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add559, R.shape([1, seq_len, 20, 64])) concat30: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape687, reshape688, reshape689), axis=2) reshape690: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat30, R.shape([seq_len, 60, 64])) lv129 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape690), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape691: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv129, R.shape([1, seq_len, 20, 64])) reshape692: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape691, R.shape([1, seq_len, 1280])) permute_dims500: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_out_proj_weight2, axes=None) matmul499: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape692, permute_dims500, out_dtype="void") add560: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul499, model_decoder_layers_30_self_attn_out_proj_bias2) add561: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add557, add560) layer_norm156: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add561, model_decoder_layers_30_encoder_attn_layer_norm_weight2, model_decoder_layers_30_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims501: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_q_proj_weight2, axes=None) matmul500: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm156, permute_dims501, out_dtype="void") add562: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul500, model_decoder_layers_30_encoder_attn_q_proj_bias2) reshape693: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add562, R.shape([1, seq_len, 20, 64])) reshape694: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape693, R.shape([seq_len, 20, 64])) lv130 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape694), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape695: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv130, R.shape([1, seq_len, 20, 64])) reshape696: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape695, R.shape([1, seq_len, 1280])) permute_dims502: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_out_proj_weight2, axes=None) matmul501: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape696, permute_dims502, out_dtype="void") add563: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul501, model_decoder_layers_30_encoder_attn_out_proj_bias2) add564: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add561, add563) layer_norm157: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add564, model_decoder_layers_30_final_layer_norm_weight2, model_decoder_layers_30_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims503: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc1_weight2, axes=None) matmul502: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm157, permute_dims503, out_dtype="void") add565: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul502, model_decoder_layers_30_fc1_bias2) gelu64: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add565) permute_dims504: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc2_weight2, axes=None) matmul503: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu64, permute_dims504, out_dtype="void") add566: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul503, model_decoder_layers_30_fc2_bias2) add567: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add564, add566) layer_norm158: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add567, model_decoder_layers_31_self_attn_layer_norm_weight2, model_decoder_layers_31_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims505: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_q_proj_weight2, axes=None) matmul504: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm158, permute_dims505, out_dtype="void") add568: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul504, model_decoder_layers_31_self_attn_q_proj_bias2) reshape697: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add568, R.shape([1, seq_len, 20, 64])) permute_dims506: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_k_proj_weight2, axes=None) matmul505: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm158, permute_dims506, out_dtype="void") reshape698: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul505, R.shape([1, seq_len, 20, 64])) permute_dims507: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_v_proj_weight2, axes=None) matmul506: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm158, permute_dims507, out_dtype="void") add569: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul506, model_decoder_layers_31_self_attn_v_proj_bias2) reshape699: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add569, R.shape([1, seq_len, 20, 64])) concat31: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape697, reshape698, reshape699), axis=2) reshape700: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat31, R.shape([seq_len, 60, 64])) lv131 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape700), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape701: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv131, R.shape([1, seq_len, 20, 64])) reshape702: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape701, R.shape([1, seq_len, 1280])) permute_dims508: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_out_proj_weight2, axes=None) matmul507: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape702, permute_dims508, out_dtype="void") add570: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul507, model_decoder_layers_31_self_attn_out_proj_bias2) add571: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add567, add570) layer_norm159: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add571, model_decoder_layers_31_encoder_attn_layer_norm_weight2, model_decoder_layers_31_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims509: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_q_proj_weight2, axes=None) matmul508: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm159, permute_dims509, out_dtype="void") add572: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul508, model_decoder_layers_31_encoder_attn_q_proj_bias2) reshape703: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add572, R.shape([1, seq_len, 20, 64])) reshape704: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape703, R.shape([seq_len, 20, 64])) lv132 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape704), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape705: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv132, R.shape([1, seq_len, 20, 64])) reshape706: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape705, R.shape([1, seq_len, 1280])) permute_dims510: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_out_proj_weight2, axes=None) matmul509: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape706, permute_dims510, out_dtype="void") add573: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul509, model_decoder_layers_31_encoder_attn_out_proj_bias2) add574: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add571, add573) layer_norm160: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add574, model_decoder_layers_31_final_layer_norm_weight2, model_decoder_layers_31_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims511: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc1_weight2, axes=None) matmul510: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm160, permute_dims511, out_dtype="void") add575: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul510, model_decoder_layers_31_fc1_bias2) gelu65: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add575) permute_dims512: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc2_weight2, axes=None) matmul511: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu65, permute_dims512, out_dtype="void") add576: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul511, model_decoder_layers_31_fc2_bias2) add577: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add574, add576) layer_norm161: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add577, model_decoder_layer_norm_weight2, model_decoder_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) take2: R.Tensor((1, batch_size, 1280), dtype="float16") = R.take(layer_norm161, logit_positions, axis=1) permute_dims513: R.Tensor((1280, 51866), dtype="float16") = R.permute_dims(model_decoder_embed_tokens_weight2, axes=None) matmul512: R.Tensor((1, batch_size, 51866), dtype="float32") = R.matmul(take2, permute_dims513, out_dtype="float32") gv2: R.Tensor((1, batch_size, 51866), dtype="float32") = matmul512 R.output(gv2) return gv2 @R.function def create_tir_paged_kv_cache(max_batch_size_: R.Shape(["max_batch_size"]), max_total_seq_len_: R.Shape(["max_total_seq_len"]), prefill_chunk_size_: R.Shape(["prefill_chunk_size"]), page_size_: R.Shape(["page_size"]), support_sliding_window_: R.Shape(["support_sliding_window"])) -> R.Object: max_batch_size = T.int64() max_total_seq_len = T.int64() prefill_chunk_size = T.int64() page_size = T.int64() support_sliding_window = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) cls = Module gv: R.Tensor((), dtype="float16") = R.zeros(R.shape([]), dtype="float16") paged_kv_cache: R.Object = R.call_pure_packed("vm.builtin.paged_attention_kv_cache_create_reduced", R.shape([max_batch_size, max_total_seq_len, prefill_chunk_size, page_size, support_sliding_window]), R.prim_value(32), R.prim_value(20), R.prim_value(20), R.prim_value(64), R.prim_value(0), R.prim_value(1), R.prim_value(1), gv, cls.tir_kv_cache_transpose_append, cls.batch_prefill_paged_kv, cls.batch_decode_paged_kv, cls.batch_prefill_paged_kv_sliding_window, cls.batch_decode_paged_kv_sliding_window, cls.batch_prefill_ragged_kv, cls.merge_state_inplace, cls.fused_rope, cls.copy_single_page, cls.tir_kv_cache_debug_get_kv, cls.compact_kv_copy, cls.batch_tree_attn, sinfo_args=(R.Object,)) return paged_kv_cache @R.function def decode(input_ids: R.Tensor((1, 1), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor((1, 1, 51866), dtype="float32"): R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) with R.dataflow(): model_encoder_conv1_weight5: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] model_encoder_conv1_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1] model_encoder_conv2_weight5: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] model_encoder_conv2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[3] model_encoder_embed_positions_weight5: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] model_encoder_layers_0_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] model_encoder_layers_0_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] model_encoder_layers_0_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[7] model_encoder_layers_0_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] model_encoder_layers_0_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[9] model_encoder_layers_0_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] model_encoder_layers_0_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[11] model_encoder_layers_0_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[12] model_encoder_layers_0_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[13] model_encoder_layers_0_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] model_encoder_layers_0_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[15] model_encoder_layers_0_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] model_encoder_layers_0_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[17] model_encoder_layers_0_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[18] model_encoder_layers_0_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[19] model_encoder_layers_1_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] model_encoder_layers_1_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] model_encoder_layers_1_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[22] model_encoder_layers_1_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] model_encoder_layers_1_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[24] model_encoder_layers_1_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] model_encoder_layers_1_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[26] model_encoder_layers_1_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[27] model_encoder_layers_1_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[28] model_encoder_layers_1_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] model_encoder_layers_1_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[30] model_encoder_layers_1_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] model_encoder_layers_1_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[32] model_encoder_layers_1_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[33] model_encoder_layers_1_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[34] model_encoder_layers_2_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] model_encoder_layers_2_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] model_encoder_layers_2_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[37] model_encoder_layers_2_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] model_encoder_layers_2_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[39] model_encoder_layers_2_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] model_encoder_layers_2_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[41] model_encoder_layers_2_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[42] model_encoder_layers_2_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[43] model_encoder_layers_2_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] model_encoder_layers_2_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[45] model_encoder_layers_2_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] model_encoder_layers_2_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[47] model_encoder_layers_2_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[48] model_encoder_layers_2_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[49] model_encoder_layers_3_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] model_encoder_layers_3_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] model_encoder_layers_3_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[52] model_encoder_layers_3_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] model_encoder_layers_3_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[54] model_encoder_layers_3_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] model_encoder_layers_3_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[56] model_encoder_layers_3_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[57] model_encoder_layers_3_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[58] model_encoder_layers_3_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] model_encoder_layers_3_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[60] model_encoder_layers_3_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] model_encoder_layers_3_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[62] model_encoder_layers_3_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[63] model_encoder_layers_3_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[64] model_encoder_layers_4_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] model_encoder_layers_4_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] model_encoder_layers_4_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[67] model_encoder_layers_4_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] model_encoder_layers_4_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[69] model_encoder_layers_4_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] model_encoder_layers_4_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[71] model_encoder_layers_4_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[72] model_encoder_layers_4_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[73] model_encoder_layers_4_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] model_encoder_layers_4_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[75] model_encoder_layers_4_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] model_encoder_layers_4_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[77] model_encoder_layers_4_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[78] model_encoder_layers_4_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[79] model_encoder_layers_5_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] model_encoder_layers_5_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] model_encoder_layers_5_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[82] model_encoder_layers_5_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] model_encoder_layers_5_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[84] model_encoder_layers_5_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] model_encoder_layers_5_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[86] model_encoder_layers_5_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[87] model_encoder_layers_5_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[88] model_encoder_layers_5_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] model_encoder_layers_5_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[90] model_encoder_layers_5_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] model_encoder_layers_5_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[92] model_encoder_layers_5_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[93] model_encoder_layers_5_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[94] model_encoder_layers_6_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] model_encoder_layers_6_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] model_encoder_layers_6_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[97] model_encoder_layers_6_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] model_encoder_layers_6_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[99] model_encoder_layers_6_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] model_encoder_layers_6_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[101] model_encoder_layers_6_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[102] model_encoder_layers_6_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[103] model_encoder_layers_6_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] model_encoder_layers_6_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[105] model_encoder_layers_6_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] model_encoder_layers_6_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[107] model_encoder_layers_6_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[108] model_encoder_layers_6_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[109] model_encoder_layers_7_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] model_encoder_layers_7_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] model_encoder_layers_7_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[112] model_encoder_layers_7_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] model_encoder_layers_7_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[114] model_encoder_layers_7_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] model_encoder_layers_7_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[116] model_encoder_layers_7_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[117] model_encoder_layers_7_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[118] model_encoder_layers_7_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] model_encoder_layers_7_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[120] model_encoder_layers_7_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] model_encoder_layers_7_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[122] model_encoder_layers_7_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[123] model_encoder_layers_7_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[124] model_encoder_layers_8_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] model_encoder_layers_8_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] model_encoder_layers_8_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[127] model_encoder_layers_8_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] model_encoder_layers_8_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[129] model_encoder_layers_8_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] model_encoder_layers_8_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[131] model_encoder_layers_8_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[132] model_encoder_layers_8_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[133] model_encoder_layers_8_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] model_encoder_layers_8_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[135] model_encoder_layers_8_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] model_encoder_layers_8_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[137] model_encoder_layers_8_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[138] model_encoder_layers_8_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[139] model_encoder_layers_9_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] model_encoder_layers_9_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] model_encoder_layers_9_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[142] model_encoder_layers_9_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] model_encoder_layers_9_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[144] model_encoder_layers_9_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] model_encoder_layers_9_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[146] model_encoder_layers_9_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[147] model_encoder_layers_9_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[148] model_encoder_layers_9_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] model_encoder_layers_9_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[150] model_encoder_layers_9_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] model_encoder_layers_9_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[152] model_encoder_layers_9_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[153] model_encoder_layers_9_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[154] model_encoder_layers_10_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] model_encoder_layers_10_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] model_encoder_layers_10_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[157] model_encoder_layers_10_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] model_encoder_layers_10_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[159] model_encoder_layers_10_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] model_encoder_layers_10_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[161] model_encoder_layers_10_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[162] model_encoder_layers_10_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[163] model_encoder_layers_10_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] model_encoder_layers_10_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[165] model_encoder_layers_10_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] model_encoder_layers_10_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[167] model_encoder_layers_10_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[168] model_encoder_layers_10_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[169] model_encoder_layers_11_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] model_encoder_layers_11_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] model_encoder_layers_11_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[172] model_encoder_layers_11_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] model_encoder_layers_11_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[174] model_encoder_layers_11_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] model_encoder_layers_11_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[176] model_encoder_layers_11_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[177] model_encoder_layers_11_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[178] model_encoder_layers_11_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] model_encoder_layers_11_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[180] model_encoder_layers_11_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] model_encoder_layers_11_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[182] model_encoder_layers_11_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[183] model_encoder_layers_11_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[184] model_encoder_layers_12_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] model_encoder_layers_12_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] model_encoder_layers_12_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[187] model_encoder_layers_12_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] model_encoder_layers_12_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[189] model_encoder_layers_12_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] model_encoder_layers_12_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[191] model_encoder_layers_12_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[192] model_encoder_layers_12_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[193] model_encoder_layers_12_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] model_encoder_layers_12_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[195] model_encoder_layers_12_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] model_encoder_layers_12_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[197] model_encoder_layers_12_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[198] model_encoder_layers_12_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[199] model_encoder_layers_13_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] model_encoder_layers_13_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] model_encoder_layers_13_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[202] model_encoder_layers_13_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] model_encoder_layers_13_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[204] model_encoder_layers_13_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] model_encoder_layers_13_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[206] model_encoder_layers_13_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[207] model_encoder_layers_13_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[208] model_encoder_layers_13_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] model_encoder_layers_13_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[210] model_encoder_layers_13_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] model_encoder_layers_13_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[212] model_encoder_layers_13_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[213] model_encoder_layers_13_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[214] model_encoder_layers_14_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] model_encoder_layers_14_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] model_encoder_layers_14_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[217] model_encoder_layers_14_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] model_encoder_layers_14_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[219] model_encoder_layers_14_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] model_encoder_layers_14_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[221] model_encoder_layers_14_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[222] model_encoder_layers_14_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[223] model_encoder_layers_14_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] model_encoder_layers_14_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[225] model_encoder_layers_14_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] model_encoder_layers_14_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[227] model_encoder_layers_14_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[228] model_encoder_layers_14_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[229] model_encoder_layers_15_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] model_encoder_layers_15_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] model_encoder_layers_15_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[232] model_encoder_layers_15_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] model_encoder_layers_15_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[234] model_encoder_layers_15_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] model_encoder_layers_15_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[236] model_encoder_layers_15_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[237] model_encoder_layers_15_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[238] model_encoder_layers_15_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] model_encoder_layers_15_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[240] model_encoder_layers_15_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] model_encoder_layers_15_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[242] model_encoder_layers_15_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[243] model_encoder_layers_15_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[244] model_encoder_layers_16_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] model_encoder_layers_16_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] model_encoder_layers_16_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[247] model_encoder_layers_16_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] model_encoder_layers_16_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[249] model_encoder_layers_16_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] model_encoder_layers_16_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[251] model_encoder_layers_16_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[252] model_encoder_layers_16_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[253] model_encoder_layers_16_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] model_encoder_layers_16_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[255] model_encoder_layers_16_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] model_encoder_layers_16_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[257] model_encoder_layers_16_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[258] model_encoder_layers_16_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[259] model_encoder_layers_17_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] model_encoder_layers_17_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] model_encoder_layers_17_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[262] model_encoder_layers_17_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] model_encoder_layers_17_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[264] model_encoder_layers_17_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] model_encoder_layers_17_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[266] model_encoder_layers_17_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[267] model_encoder_layers_17_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[268] model_encoder_layers_17_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] model_encoder_layers_17_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[270] model_encoder_layers_17_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] model_encoder_layers_17_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[272] model_encoder_layers_17_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[273] model_encoder_layers_17_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[274] model_encoder_layers_18_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] model_encoder_layers_18_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] model_encoder_layers_18_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[277] model_encoder_layers_18_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] model_encoder_layers_18_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[279] model_encoder_layers_18_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] model_encoder_layers_18_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[281] model_encoder_layers_18_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[282] model_encoder_layers_18_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[283] model_encoder_layers_18_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] model_encoder_layers_18_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[285] model_encoder_layers_18_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] model_encoder_layers_18_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[287] model_encoder_layers_18_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[288] model_encoder_layers_18_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[289] model_encoder_layers_19_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] model_encoder_layers_19_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] model_encoder_layers_19_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[292] model_encoder_layers_19_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] model_encoder_layers_19_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[294] model_encoder_layers_19_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] model_encoder_layers_19_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[296] model_encoder_layers_19_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[297] model_encoder_layers_19_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[298] model_encoder_layers_19_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] model_encoder_layers_19_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[300] model_encoder_layers_19_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] model_encoder_layers_19_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[302] model_encoder_layers_19_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[303] model_encoder_layers_19_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[304] model_encoder_layers_20_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] model_encoder_layers_20_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] model_encoder_layers_20_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[307] model_encoder_layers_20_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] model_encoder_layers_20_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[309] model_encoder_layers_20_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] model_encoder_layers_20_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[311] model_encoder_layers_20_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[312] model_encoder_layers_20_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[313] model_encoder_layers_20_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] model_encoder_layers_20_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[315] model_encoder_layers_20_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] model_encoder_layers_20_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[317] model_encoder_layers_20_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[318] model_encoder_layers_20_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[319] model_encoder_layers_21_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] model_encoder_layers_21_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] model_encoder_layers_21_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[322] model_encoder_layers_21_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] model_encoder_layers_21_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[324] model_encoder_layers_21_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] model_encoder_layers_21_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[326] model_encoder_layers_21_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[327] model_encoder_layers_21_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[328] model_encoder_layers_21_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] model_encoder_layers_21_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[330] model_encoder_layers_21_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] model_encoder_layers_21_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[332] model_encoder_layers_21_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[333] model_encoder_layers_21_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[334] model_encoder_layers_22_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] model_encoder_layers_22_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] model_encoder_layers_22_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[337] model_encoder_layers_22_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] model_encoder_layers_22_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[339] model_encoder_layers_22_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] model_encoder_layers_22_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[341] model_encoder_layers_22_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[342] model_encoder_layers_22_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[343] model_encoder_layers_22_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] model_encoder_layers_22_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[345] model_encoder_layers_22_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] model_encoder_layers_22_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[347] model_encoder_layers_22_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[348] model_encoder_layers_22_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[349] model_encoder_layers_23_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] model_encoder_layers_23_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] model_encoder_layers_23_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[352] model_encoder_layers_23_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] model_encoder_layers_23_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[354] model_encoder_layers_23_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] model_encoder_layers_23_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[356] model_encoder_layers_23_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[357] model_encoder_layers_23_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[358] model_encoder_layers_23_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] model_encoder_layers_23_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[360] model_encoder_layers_23_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] model_encoder_layers_23_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[362] model_encoder_layers_23_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[363] model_encoder_layers_23_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[364] model_encoder_layers_24_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] model_encoder_layers_24_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] model_encoder_layers_24_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[367] model_encoder_layers_24_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] model_encoder_layers_24_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[369] model_encoder_layers_24_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] model_encoder_layers_24_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[371] model_encoder_layers_24_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[372] model_encoder_layers_24_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[373] model_encoder_layers_24_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] model_encoder_layers_24_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[375] model_encoder_layers_24_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] model_encoder_layers_24_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[377] model_encoder_layers_24_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[378] model_encoder_layers_24_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[379] model_encoder_layers_25_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] model_encoder_layers_25_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] model_encoder_layers_25_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[382] model_encoder_layers_25_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] model_encoder_layers_25_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[384] model_encoder_layers_25_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] model_encoder_layers_25_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[386] model_encoder_layers_25_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[387] model_encoder_layers_25_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[388] model_encoder_layers_25_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] model_encoder_layers_25_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[390] model_encoder_layers_25_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] model_encoder_layers_25_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[392] model_encoder_layers_25_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[393] model_encoder_layers_25_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[394] model_encoder_layers_26_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] model_encoder_layers_26_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] model_encoder_layers_26_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[397] model_encoder_layers_26_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] model_encoder_layers_26_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[399] model_encoder_layers_26_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] model_encoder_layers_26_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[401] model_encoder_layers_26_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[402] model_encoder_layers_26_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[403] model_encoder_layers_26_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] model_encoder_layers_26_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[405] model_encoder_layers_26_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] model_encoder_layers_26_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[407] model_encoder_layers_26_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[408] model_encoder_layers_26_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[409] model_encoder_layers_27_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] model_encoder_layers_27_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] model_encoder_layers_27_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[412] model_encoder_layers_27_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] model_encoder_layers_27_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[414] model_encoder_layers_27_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] model_encoder_layers_27_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[416] model_encoder_layers_27_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[417] model_encoder_layers_27_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[418] model_encoder_layers_27_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] model_encoder_layers_27_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[420] model_encoder_layers_27_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] model_encoder_layers_27_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[422] model_encoder_layers_27_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[423] model_encoder_layers_27_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[424] model_encoder_layers_28_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] model_encoder_layers_28_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] model_encoder_layers_28_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[427] model_encoder_layers_28_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] model_encoder_layers_28_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[429] model_encoder_layers_28_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] model_encoder_layers_28_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[431] model_encoder_layers_28_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[432] model_encoder_layers_28_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[433] model_encoder_layers_28_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] model_encoder_layers_28_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[435] model_encoder_layers_28_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] model_encoder_layers_28_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[437] model_encoder_layers_28_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[438] model_encoder_layers_28_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[439] model_encoder_layers_29_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] model_encoder_layers_29_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] model_encoder_layers_29_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[442] model_encoder_layers_29_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] model_encoder_layers_29_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[444] model_encoder_layers_29_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] model_encoder_layers_29_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[446] model_encoder_layers_29_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[447] model_encoder_layers_29_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[448] model_encoder_layers_29_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] model_encoder_layers_29_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[450] model_encoder_layers_29_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] model_encoder_layers_29_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[452] model_encoder_layers_29_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[453] model_encoder_layers_29_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[454] model_encoder_layers_30_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] model_encoder_layers_30_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] model_encoder_layers_30_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[457] model_encoder_layers_30_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] model_encoder_layers_30_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[459] model_encoder_layers_30_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] model_encoder_layers_30_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[461] model_encoder_layers_30_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[462] model_encoder_layers_30_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[463] model_encoder_layers_30_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] model_encoder_layers_30_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[465] model_encoder_layers_30_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] model_encoder_layers_30_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[467] model_encoder_layers_30_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[468] model_encoder_layers_30_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[469] model_encoder_layers_31_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] model_encoder_layers_31_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] model_encoder_layers_31_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[472] model_encoder_layers_31_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] model_encoder_layers_31_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[474] model_encoder_layers_31_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] model_encoder_layers_31_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[476] model_encoder_layers_31_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[477] model_encoder_layers_31_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[478] model_encoder_layers_31_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] model_encoder_layers_31_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[480] model_encoder_layers_31_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] model_encoder_layers_31_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[482] model_encoder_layers_31_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[483] model_encoder_layers_31_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[484] model_encoder_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[485] model_encoder_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[486] model_decoder_embed_tokens_weight5: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] model_decoder_embed_positions_weight5: R.Tensor((448, 1280), dtype="float16") = packed_params[488] model_decoder_layers_0_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] model_decoder_layers_0_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] model_decoder_layers_0_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[491] model_decoder_layers_0_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] model_decoder_layers_0_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[493] model_decoder_layers_0_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] model_decoder_layers_0_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[495] model_decoder_layers_0_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[496] model_decoder_layers_0_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[497] model_decoder_layers_0_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] model_decoder_layers_0_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] model_decoder_layers_0_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[500] model_decoder_layers_0_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] model_decoder_layers_0_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[502] model_decoder_layers_0_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] model_decoder_layers_0_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[504] model_decoder_layers_0_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[505] model_decoder_layers_0_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[506] model_decoder_layers_0_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] model_decoder_layers_0_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[508] model_decoder_layers_0_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] model_decoder_layers_0_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[510] model_decoder_layers_0_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[511] model_decoder_layers_0_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[512] model_decoder_layers_1_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] model_decoder_layers_1_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] model_decoder_layers_1_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[515] model_decoder_layers_1_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] model_decoder_layers_1_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[517] model_decoder_layers_1_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] model_decoder_layers_1_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[519] model_decoder_layers_1_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[520] model_decoder_layers_1_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[521] model_decoder_layers_1_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] model_decoder_layers_1_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] model_decoder_layers_1_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[524] model_decoder_layers_1_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] model_decoder_layers_1_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[526] model_decoder_layers_1_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] model_decoder_layers_1_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[528] model_decoder_layers_1_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[529] model_decoder_layers_1_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[530] model_decoder_layers_1_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] model_decoder_layers_1_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[532] model_decoder_layers_1_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] model_decoder_layers_1_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[534] model_decoder_layers_1_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[535] model_decoder_layers_1_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[536] model_decoder_layers_2_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] model_decoder_layers_2_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] model_decoder_layers_2_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[539] model_decoder_layers_2_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] model_decoder_layers_2_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[541] model_decoder_layers_2_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] model_decoder_layers_2_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[543] model_decoder_layers_2_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[544] model_decoder_layers_2_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[545] model_decoder_layers_2_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] model_decoder_layers_2_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] model_decoder_layers_2_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[548] model_decoder_layers_2_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] model_decoder_layers_2_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[550] model_decoder_layers_2_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] model_decoder_layers_2_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[552] model_decoder_layers_2_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[553] model_decoder_layers_2_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[554] model_decoder_layers_2_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] model_decoder_layers_2_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[556] model_decoder_layers_2_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] model_decoder_layers_2_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[558] model_decoder_layers_2_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[559] model_decoder_layers_2_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[560] model_decoder_layers_3_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] model_decoder_layers_3_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] model_decoder_layers_3_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[563] model_decoder_layers_3_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] model_decoder_layers_3_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[565] model_decoder_layers_3_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] model_decoder_layers_3_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[567] model_decoder_layers_3_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[568] model_decoder_layers_3_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[569] model_decoder_layers_3_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] model_decoder_layers_3_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] model_decoder_layers_3_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[572] model_decoder_layers_3_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] model_decoder_layers_3_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[574] model_decoder_layers_3_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] model_decoder_layers_3_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[576] model_decoder_layers_3_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[577] model_decoder_layers_3_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[578] model_decoder_layers_3_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] model_decoder_layers_3_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[580] model_decoder_layers_3_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] model_decoder_layers_3_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[582] model_decoder_layers_3_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[583] model_decoder_layers_3_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[584] model_decoder_layers_4_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] model_decoder_layers_4_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] model_decoder_layers_4_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[587] model_decoder_layers_4_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] model_decoder_layers_4_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[589] model_decoder_layers_4_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] model_decoder_layers_4_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[591] model_decoder_layers_4_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[592] model_decoder_layers_4_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[593] model_decoder_layers_4_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] model_decoder_layers_4_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] model_decoder_layers_4_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[596] model_decoder_layers_4_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] model_decoder_layers_4_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[598] model_decoder_layers_4_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] model_decoder_layers_4_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[600] model_decoder_layers_4_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[601] model_decoder_layers_4_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[602] model_decoder_layers_4_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] model_decoder_layers_4_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[604] model_decoder_layers_4_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] model_decoder_layers_4_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[606] model_decoder_layers_4_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[607] model_decoder_layers_4_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[608] model_decoder_layers_5_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] model_decoder_layers_5_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] model_decoder_layers_5_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[611] model_decoder_layers_5_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] model_decoder_layers_5_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[613] model_decoder_layers_5_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] model_decoder_layers_5_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[615] model_decoder_layers_5_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[616] model_decoder_layers_5_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[617] model_decoder_layers_5_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] model_decoder_layers_5_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] model_decoder_layers_5_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[620] model_decoder_layers_5_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] model_decoder_layers_5_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[622] model_decoder_layers_5_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] model_decoder_layers_5_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[624] model_decoder_layers_5_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[625] model_decoder_layers_5_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[626] model_decoder_layers_5_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] model_decoder_layers_5_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[628] model_decoder_layers_5_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] model_decoder_layers_5_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[630] model_decoder_layers_5_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[631] model_decoder_layers_5_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[632] model_decoder_layers_6_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] model_decoder_layers_6_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] model_decoder_layers_6_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[635] model_decoder_layers_6_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] model_decoder_layers_6_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[637] model_decoder_layers_6_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] model_decoder_layers_6_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[639] model_decoder_layers_6_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[640] model_decoder_layers_6_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[641] model_decoder_layers_6_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] model_decoder_layers_6_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] model_decoder_layers_6_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[644] model_decoder_layers_6_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] model_decoder_layers_6_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[646] model_decoder_layers_6_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] model_decoder_layers_6_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[648] model_decoder_layers_6_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[649] model_decoder_layers_6_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[650] model_decoder_layers_6_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] model_decoder_layers_6_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[652] model_decoder_layers_6_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] model_decoder_layers_6_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[654] model_decoder_layers_6_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[655] model_decoder_layers_6_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[656] model_decoder_layers_7_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] model_decoder_layers_7_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] model_decoder_layers_7_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[659] model_decoder_layers_7_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] model_decoder_layers_7_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[661] model_decoder_layers_7_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] model_decoder_layers_7_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[663] model_decoder_layers_7_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[664] model_decoder_layers_7_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[665] model_decoder_layers_7_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] model_decoder_layers_7_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] model_decoder_layers_7_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[668] model_decoder_layers_7_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] model_decoder_layers_7_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[670] model_decoder_layers_7_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] model_decoder_layers_7_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[672] model_decoder_layers_7_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[673] model_decoder_layers_7_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[674] model_decoder_layers_7_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] model_decoder_layers_7_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[676] model_decoder_layers_7_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] model_decoder_layers_7_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[678] model_decoder_layers_7_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[679] model_decoder_layers_7_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[680] model_decoder_layers_8_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] model_decoder_layers_8_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] model_decoder_layers_8_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[683] model_decoder_layers_8_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] model_decoder_layers_8_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[685] model_decoder_layers_8_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] model_decoder_layers_8_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[687] model_decoder_layers_8_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[688] model_decoder_layers_8_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[689] model_decoder_layers_8_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] model_decoder_layers_8_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] model_decoder_layers_8_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[692] model_decoder_layers_8_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] model_decoder_layers_8_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[694] model_decoder_layers_8_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] model_decoder_layers_8_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[696] model_decoder_layers_8_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[697] model_decoder_layers_8_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[698] model_decoder_layers_8_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] model_decoder_layers_8_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[700] model_decoder_layers_8_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] model_decoder_layers_8_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[702] model_decoder_layers_8_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[703] model_decoder_layers_8_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[704] model_decoder_layers_9_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] model_decoder_layers_9_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] model_decoder_layers_9_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[707] model_decoder_layers_9_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] model_decoder_layers_9_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[709] model_decoder_layers_9_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] model_decoder_layers_9_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[711] model_decoder_layers_9_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[712] model_decoder_layers_9_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[713] model_decoder_layers_9_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] model_decoder_layers_9_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] model_decoder_layers_9_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[716] model_decoder_layers_9_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] model_decoder_layers_9_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[718] model_decoder_layers_9_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] model_decoder_layers_9_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[720] model_decoder_layers_9_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[721] model_decoder_layers_9_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[722] model_decoder_layers_9_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] model_decoder_layers_9_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[724] model_decoder_layers_9_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] model_decoder_layers_9_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[726] model_decoder_layers_9_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[727] model_decoder_layers_9_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[728] model_decoder_layers_10_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] model_decoder_layers_10_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] model_decoder_layers_10_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[731] model_decoder_layers_10_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] model_decoder_layers_10_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[733] model_decoder_layers_10_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] model_decoder_layers_10_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[735] model_decoder_layers_10_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[736] model_decoder_layers_10_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[737] model_decoder_layers_10_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] model_decoder_layers_10_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] model_decoder_layers_10_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[740] model_decoder_layers_10_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] model_decoder_layers_10_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[742] model_decoder_layers_10_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] model_decoder_layers_10_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[744] model_decoder_layers_10_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[745] model_decoder_layers_10_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[746] model_decoder_layers_10_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] model_decoder_layers_10_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[748] model_decoder_layers_10_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] model_decoder_layers_10_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[750] model_decoder_layers_10_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[751] model_decoder_layers_10_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[752] model_decoder_layers_11_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] model_decoder_layers_11_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] model_decoder_layers_11_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[755] model_decoder_layers_11_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] model_decoder_layers_11_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[757] model_decoder_layers_11_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] model_decoder_layers_11_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[759] model_decoder_layers_11_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[760] model_decoder_layers_11_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[761] model_decoder_layers_11_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] model_decoder_layers_11_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] model_decoder_layers_11_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[764] model_decoder_layers_11_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] model_decoder_layers_11_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[766] model_decoder_layers_11_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] model_decoder_layers_11_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[768] model_decoder_layers_11_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[769] model_decoder_layers_11_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[770] model_decoder_layers_11_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] model_decoder_layers_11_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[772] model_decoder_layers_11_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] model_decoder_layers_11_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[774] model_decoder_layers_11_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[775] model_decoder_layers_11_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[776] model_decoder_layers_12_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] model_decoder_layers_12_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] model_decoder_layers_12_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[779] model_decoder_layers_12_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] model_decoder_layers_12_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[781] model_decoder_layers_12_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] model_decoder_layers_12_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[783] model_decoder_layers_12_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[784] model_decoder_layers_12_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[785] model_decoder_layers_12_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] model_decoder_layers_12_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] model_decoder_layers_12_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[788] model_decoder_layers_12_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] model_decoder_layers_12_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[790] model_decoder_layers_12_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] model_decoder_layers_12_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[792] model_decoder_layers_12_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[793] model_decoder_layers_12_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[794] model_decoder_layers_12_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] model_decoder_layers_12_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[796] model_decoder_layers_12_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] model_decoder_layers_12_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[798] model_decoder_layers_12_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[799] model_decoder_layers_12_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[800] model_decoder_layers_13_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] model_decoder_layers_13_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] model_decoder_layers_13_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[803] model_decoder_layers_13_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] model_decoder_layers_13_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[805] model_decoder_layers_13_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] model_decoder_layers_13_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[807] model_decoder_layers_13_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[808] model_decoder_layers_13_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[809] model_decoder_layers_13_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] model_decoder_layers_13_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] model_decoder_layers_13_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[812] model_decoder_layers_13_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] model_decoder_layers_13_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[814] model_decoder_layers_13_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] model_decoder_layers_13_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[816] model_decoder_layers_13_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[817] model_decoder_layers_13_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[818] model_decoder_layers_13_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] model_decoder_layers_13_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[820] model_decoder_layers_13_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] model_decoder_layers_13_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[822] model_decoder_layers_13_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[823] model_decoder_layers_13_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[824] model_decoder_layers_14_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] model_decoder_layers_14_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] model_decoder_layers_14_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[827] model_decoder_layers_14_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] model_decoder_layers_14_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[829] model_decoder_layers_14_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] model_decoder_layers_14_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[831] model_decoder_layers_14_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[832] model_decoder_layers_14_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[833] model_decoder_layers_14_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] model_decoder_layers_14_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] model_decoder_layers_14_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[836] model_decoder_layers_14_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] model_decoder_layers_14_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[838] model_decoder_layers_14_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] model_decoder_layers_14_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[840] model_decoder_layers_14_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[841] model_decoder_layers_14_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[842] model_decoder_layers_14_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] model_decoder_layers_14_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[844] model_decoder_layers_14_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] model_decoder_layers_14_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[846] model_decoder_layers_14_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[847] model_decoder_layers_14_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[848] model_decoder_layers_15_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] model_decoder_layers_15_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] model_decoder_layers_15_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[851] model_decoder_layers_15_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] model_decoder_layers_15_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[853] model_decoder_layers_15_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] model_decoder_layers_15_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[855] model_decoder_layers_15_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[856] model_decoder_layers_15_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[857] model_decoder_layers_15_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] model_decoder_layers_15_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] model_decoder_layers_15_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[860] model_decoder_layers_15_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] model_decoder_layers_15_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[862] model_decoder_layers_15_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] model_decoder_layers_15_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[864] model_decoder_layers_15_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[865] model_decoder_layers_15_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[866] model_decoder_layers_15_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] model_decoder_layers_15_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[868] model_decoder_layers_15_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] model_decoder_layers_15_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[870] model_decoder_layers_15_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[871] model_decoder_layers_15_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[872] model_decoder_layers_16_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] model_decoder_layers_16_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] model_decoder_layers_16_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[875] model_decoder_layers_16_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] model_decoder_layers_16_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[877] model_decoder_layers_16_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] model_decoder_layers_16_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[879] model_decoder_layers_16_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[880] model_decoder_layers_16_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[881] model_decoder_layers_16_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] model_decoder_layers_16_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] model_decoder_layers_16_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[884] model_decoder_layers_16_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] model_decoder_layers_16_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[886] model_decoder_layers_16_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] model_decoder_layers_16_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[888] model_decoder_layers_16_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[889] model_decoder_layers_16_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[890] model_decoder_layers_16_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] model_decoder_layers_16_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[892] model_decoder_layers_16_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] model_decoder_layers_16_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[894] model_decoder_layers_16_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[895] model_decoder_layers_16_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[896] model_decoder_layers_17_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] model_decoder_layers_17_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] model_decoder_layers_17_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[899] model_decoder_layers_17_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] model_decoder_layers_17_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[901] model_decoder_layers_17_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] model_decoder_layers_17_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[903] model_decoder_layers_17_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[904] model_decoder_layers_17_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[905] model_decoder_layers_17_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] model_decoder_layers_17_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] model_decoder_layers_17_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[908] model_decoder_layers_17_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] model_decoder_layers_17_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[910] model_decoder_layers_17_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] model_decoder_layers_17_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[912] model_decoder_layers_17_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[913] model_decoder_layers_17_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[914] model_decoder_layers_17_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] model_decoder_layers_17_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[916] model_decoder_layers_17_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] model_decoder_layers_17_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[918] model_decoder_layers_17_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[919] model_decoder_layers_17_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[920] model_decoder_layers_18_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] model_decoder_layers_18_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] model_decoder_layers_18_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[923] model_decoder_layers_18_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] model_decoder_layers_18_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[925] model_decoder_layers_18_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] model_decoder_layers_18_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[927] model_decoder_layers_18_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[928] model_decoder_layers_18_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[929] model_decoder_layers_18_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] model_decoder_layers_18_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] model_decoder_layers_18_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[932] model_decoder_layers_18_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] model_decoder_layers_18_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[934] model_decoder_layers_18_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] model_decoder_layers_18_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[936] model_decoder_layers_18_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[937] model_decoder_layers_18_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[938] model_decoder_layers_18_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] model_decoder_layers_18_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[940] model_decoder_layers_18_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] model_decoder_layers_18_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[942] model_decoder_layers_18_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[943] model_decoder_layers_18_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[944] model_decoder_layers_19_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] model_decoder_layers_19_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] model_decoder_layers_19_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[947] model_decoder_layers_19_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] model_decoder_layers_19_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[949] model_decoder_layers_19_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] model_decoder_layers_19_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[951] model_decoder_layers_19_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[952] model_decoder_layers_19_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[953] model_decoder_layers_19_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] model_decoder_layers_19_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] model_decoder_layers_19_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[956] model_decoder_layers_19_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] model_decoder_layers_19_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[958] model_decoder_layers_19_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] model_decoder_layers_19_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[960] model_decoder_layers_19_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[961] model_decoder_layers_19_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[962] model_decoder_layers_19_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] model_decoder_layers_19_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[964] model_decoder_layers_19_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] model_decoder_layers_19_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[966] model_decoder_layers_19_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[967] model_decoder_layers_19_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[968] model_decoder_layers_20_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] model_decoder_layers_20_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] model_decoder_layers_20_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[971] model_decoder_layers_20_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] model_decoder_layers_20_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[973] model_decoder_layers_20_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] model_decoder_layers_20_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[975] model_decoder_layers_20_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[976] model_decoder_layers_20_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[977] model_decoder_layers_20_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] model_decoder_layers_20_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] model_decoder_layers_20_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[980] model_decoder_layers_20_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] model_decoder_layers_20_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[982] model_decoder_layers_20_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] model_decoder_layers_20_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[984] model_decoder_layers_20_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[985] model_decoder_layers_20_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[986] model_decoder_layers_20_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] model_decoder_layers_20_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[988] model_decoder_layers_20_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] model_decoder_layers_20_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[990] model_decoder_layers_20_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[991] model_decoder_layers_20_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[992] model_decoder_layers_21_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] model_decoder_layers_21_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] model_decoder_layers_21_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[995] model_decoder_layers_21_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] model_decoder_layers_21_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[997] model_decoder_layers_21_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] model_decoder_layers_21_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[999] model_decoder_layers_21_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1000] model_decoder_layers_21_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1001] model_decoder_layers_21_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] model_decoder_layers_21_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] model_decoder_layers_21_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1004] model_decoder_layers_21_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] model_decoder_layers_21_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1006] model_decoder_layers_21_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] model_decoder_layers_21_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1008] model_decoder_layers_21_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1009] model_decoder_layers_21_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1010] model_decoder_layers_21_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] model_decoder_layers_21_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1012] model_decoder_layers_21_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] model_decoder_layers_21_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1014] model_decoder_layers_21_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1015] model_decoder_layers_21_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1016] model_decoder_layers_22_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] model_decoder_layers_22_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] model_decoder_layers_22_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1019] model_decoder_layers_22_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] model_decoder_layers_22_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1021] model_decoder_layers_22_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] model_decoder_layers_22_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1023] model_decoder_layers_22_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1024] model_decoder_layers_22_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1025] model_decoder_layers_22_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] model_decoder_layers_22_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] model_decoder_layers_22_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1028] model_decoder_layers_22_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] model_decoder_layers_22_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1030] model_decoder_layers_22_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] model_decoder_layers_22_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1032] model_decoder_layers_22_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1033] model_decoder_layers_22_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1034] model_decoder_layers_22_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] model_decoder_layers_22_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1036] model_decoder_layers_22_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] model_decoder_layers_22_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1038] model_decoder_layers_22_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1039] model_decoder_layers_22_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1040] model_decoder_layers_23_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] model_decoder_layers_23_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] model_decoder_layers_23_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1043] model_decoder_layers_23_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] model_decoder_layers_23_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1045] model_decoder_layers_23_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] model_decoder_layers_23_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1047] model_decoder_layers_23_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1048] model_decoder_layers_23_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1049] model_decoder_layers_23_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] model_decoder_layers_23_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] model_decoder_layers_23_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1052] model_decoder_layers_23_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] model_decoder_layers_23_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1054] model_decoder_layers_23_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] model_decoder_layers_23_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1056] model_decoder_layers_23_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1057] model_decoder_layers_23_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1058] model_decoder_layers_23_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] model_decoder_layers_23_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1060] model_decoder_layers_23_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] model_decoder_layers_23_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1062] model_decoder_layers_23_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1063] model_decoder_layers_23_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1064] model_decoder_layers_24_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] model_decoder_layers_24_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] model_decoder_layers_24_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1067] model_decoder_layers_24_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] model_decoder_layers_24_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1069] model_decoder_layers_24_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] model_decoder_layers_24_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1071] model_decoder_layers_24_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1072] model_decoder_layers_24_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1073] model_decoder_layers_24_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] model_decoder_layers_24_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] model_decoder_layers_24_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1076] model_decoder_layers_24_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] model_decoder_layers_24_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1078] model_decoder_layers_24_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] model_decoder_layers_24_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1080] model_decoder_layers_24_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1081] model_decoder_layers_24_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1082] model_decoder_layers_24_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] model_decoder_layers_24_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1084] model_decoder_layers_24_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] model_decoder_layers_24_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1086] model_decoder_layers_24_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1087] model_decoder_layers_24_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1088] model_decoder_layers_25_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] model_decoder_layers_25_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] model_decoder_layers_25_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1091] model_decoder_layers_25_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] model_decoder_layers_25_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1093] model_decoder_layers_25_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] model_decoder_layers_25_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1095] model_decoder_layers_25_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1096] model_decoder_layers_25_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1097] model_decoder_layers_25_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] model_decoder_layers_25_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] model_decoder_layers_25_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1100] model_decoder_layers_25_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] model_decoder_layers_25_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1102] model_decoder_layers_25_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] model_decoder_layers_25_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1104] model_decoder_layers_25_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1105] model_decoder_layers_25_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1106] model_decoder_layers_25_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] model_decoder_layers_25_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1108] model_decoder_layers_25_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] model_decoder_layers_25_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1110] model_decoder_layers_25_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1111] model_decoder_layers_25_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1112] model_decoder_layers_26_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] model_decoder_layers_26_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] model_decoder_layers_26_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1115] model_decoder_layers_26_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] model_decoder_layers_26_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1117] model_decoder_layers_26_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] model_decoder_layers_26_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1119] model_decoder_layers_26_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1120] model_decoder_layers_26_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1121] model_decoder_layers_26_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] model_decoder_layers_26_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] model_decoder_layers_26_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1124] model_decoder_layers_26_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] model_decoder_layers_26_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1126] model_decoder_layers_26_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] model_decoder_layers_26_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1128] model_decoder_layers_26_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1129] model_decoder_layers_26_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1130] model_decoder_layers_26_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] model_decoder_layers_26_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1132] model_decoder_layers_26_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] model_decoder_layers_26_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1134] model_decoder_layers_26_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1135] model_decoder_layers_26_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1136] model_decoder_layers_27_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] model_decoder_layers_27_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] model_decoder_layers_27_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1139] model_decoder_layers_27_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] model_decoder_layers_27_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1141] model_decoder_layers_27_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] model_decoder_layers_27_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1143] model_decoder_layers_27_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1144] model_decoder_layers_27_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1145] model_decoder_layers_27_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] model_decoder_layers_27_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] model_decoder_layers_27_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1148] model_decoder_layers_27_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] model_decoder_layers_27_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1150] model_decoder_layers_27_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] model_decoder_layers_27_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1152] model_decoder_layers_27_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1153] model_decoder_layers_27_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1154] model_decoder_layers_27_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] model_decoder_layers_27_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1156] model_decoder_layers_27_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] model_decoder_layers_27_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1158] model_decoder_layers_27_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1159] model_decoder_layers_27_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1160] model_decoder_layers_28_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] model_decoder_layers_28_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] model_decoder_layers_28_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1163] model_decoder_layers_28_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] model_decoder_layers_28_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1165] model_decoder_layers_28_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] model_decoder_layers_28_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1167] model_decoder_layers_28_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1168] model_decoder_layers_28_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1169] model_decoder_layers_28_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] model_decoder_layers_28_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] model_decoder_layers_28_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1172] model_decoder_layers_28_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] model_decoder_layers_28_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1174] model_decoder_layers_28_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] model_decoder_layers_28_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1176] model_decoder_layers_28_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1177] model_decoder_layers_28_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1178] model_decoder_layers_28_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] model_decoder_layers_28_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1180] model_decoder_layers_28_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] model_decoder_layers_28_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1182] model_decoder_layers_28_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1183] model_decoder_layers_28_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1184] model_decoder_layers_29_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] model_decoder_layers_29_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] model_decoder_layers_29_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1187] model_decoder_layers_29_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] model_decoder_layers_29_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1189] model_decoder_layers_29_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] model_decoder_layers_29_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1191] model_decoder_layers_29_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1192] model_decoder_layers_29_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1193] model_decoder_layers_29_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] model_decoder_layers_29_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] model_decoder_layers_29_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1196] model_decoder_layers_29_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] model_decoder_layers_29_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1198] model_decoder_layers_29_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] model_decoder_layers_29_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1200] model_decoder_layers_29_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1201] model_decoder_layers_29_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1202] model_decoder_layers_29_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] model_decoder_layers_29_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1204] model_decoder_layers_29_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] model_decoder_layers_29_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1206] model_decoder_layers_29_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1207] model_decoder_layers_29_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1208] model_decoder_layers_30_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] model_decoder_layers_30_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] model_decoder_layers_30_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1211] model_decoder_layers_30_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] model_decoder_layers_30_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1213] model_decoder_layers_30_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] model_decoder_layers_30_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1215] model_decoder_layers_30_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1216] model_decoder_layers_30_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1217] model_decoder_layers_30_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] model_decoder_layers_30_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] model_decoder_layers_30_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1220] model_decoder_layers_30_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] model_decoder_layers_30_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1222] model_decoder_layers_30_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] model_decoder_layers_30_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1224] model_decoder_layers_30_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1225] model_decoder_layers_30_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1226] model_decoder_layers_30_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] model_decoder_layers_30_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1228] model_decoder_layers_30_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] model_decoder_layers_30_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1230] model_decoder_layers_30_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1231] model_decoder_layers_30_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1232] model_decoder_layers_31_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] model_decoder_layers_31_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] model_decoder_layers_31_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1235] model_decoder_layers_31_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] model_decoder_layers_31_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1237] model_decoder_layers_31_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] model_decoder_layers_31_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1239] model_decoder_layers_31_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1240] model_decoder_layers_31_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1241] model_decoder_layers_31_encoder_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] model_decoder_layers_31_encoder_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] model_decoder_layers_31_encoder_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1244] model_decoder_layers_31_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] model_decoder_layers_31_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1246] model_decoder_layers_31_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] model_decoder_layers_31_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1248] model_decoder_layers_31_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1249] model_decoder_layers_31_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1250] model_decoder_layers_31_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] model_decoder_layers_31_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1252] model_decoder_layers_31_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] model_decoder_layers_31_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1254] model_decoder_layers_31_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1255] model_decoder_layers_31_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1256] model_decoder_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1257] model_decoder_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1258] reshape1353: R.Tensor((1,), dtype="int32") = R.reshape(input_ids, R.shape([1])) take7: R.Tensor((1, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight5, reshape1353, axis=0) reshape1354: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(take7, R.shape([1, 1, 1280])) lv264: R.Tensor((1,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((1,), dtype="int32"),)) take8: R.Tensor((1, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight5, lv264, axis=0) reshape1355: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(take8, R.shape([1, 1, 1280])) add1220: R.Tensor((1, 1, 1280), dtype="float16") = R.add(reshape1354, reshape1355) layer_norm356: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1220, model_decoder_layers_0_self_attn_layer_norm_weight5, model_decoder_layers_0_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1028: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_q_proj_weight5, axes=None) matmul1027: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm356, permute_dims1028, out_dtype="void") add1221: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1027, model_decoder_layers_0_self_attn_q_proj_bias5) reshape1356: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1221, R.shape([1, 1, 20, 64])) permute_dims1029: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_k_proj_weight5, axes=None) matmul1028: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm356, permute_dims1029, out_dtype="void") reshape1357: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1028, R.shape([1, 1, 20, 64])) permute_dims1030: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_v_proj_weight5, axes=None) matmul1029: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm356, permute_dims1030, out_dtype="void") add1222: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1029, model_decoder_layers_0_self_attn_v_proj_bias5) reshape1358: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1222, R.shape([1, 1, 20, 64])) concat96: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1356, reshape1357, reshape1358), axis=2) reshape1359: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat96, R.shape([1, 60, 64])) lv265 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1359), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1360: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv265, R.shape([1, 1, 20, 64])) reshape1361: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1360, R.shape([1, 1, 1280])) permute_dims1031: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_out_proj_weight5, axes=None) matmul1030: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1361, permute_dims1031, out_dtype="void") add1223: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1030, model_decoder_layers_0_self_attn_out_proj_bias5) add1224: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1220, add1223) layer_norm357: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1224, model_decoder_layers_0_encoder_attn_layer_norm_weight5, model_decoder_layers_0_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1032: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_q_proj_weight5, axes=None) matmul1031: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm357, permute_dims1032, out_dtype="void") add1225: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1031, model_decoder_layers_0_encoder_attn_q_proj_bias5) reshape1362: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1225, R.shape([1, 1, 20, 64])) reshape1363: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1362, R.shape([1, 20, 64])) lv266 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1363), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1364: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv266, R.shape([1, 1, 20, 64])) reshape1365: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1364, R.shape([1, 1, 1280])) permute_dims1033: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_out_proj_weight5, axes=None) matmul1032: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1365, permute_dims1033, out_dtype="void") add1226: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1032, model_decoder_layers_0_encoder_attn_out_proj_bias5) add1227: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1224, add1226) layer_norm358: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1227, model_decoder_layers_0_final_layer_norm_weight5, model_decoder_layers_0_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1034: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc1_weight5, axes=None) matmul1033: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm358, permute_dims1034, out_dtype="void") add1228: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1033, model_decoder_layers_0_fc1_bias5) gelu130: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1228) permute_dims1035: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc2_weight5, axes=None) matmul1034: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu130, permute_dims1035, out_dtype="void") add1229: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1034, model_decoder_layers_0_fc2_bias5) add1230: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1227, add1229) layer_norm359: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1230, model_decoder_layers_1_self_attn_layer_norm_weight5, model_decoder_layers_1_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1036: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_q_proj_weight5, axes=None) matmul1035: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm359, permute_dims1036, out_dtype="void") add1231: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1035, model_decoder_layers_1_self_attn_q_proj_bias5) reshape1366: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1231, R.shape([1, 1, 20, 64])) permute_dims1037: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_k_proj_weight5, axes=None) matmul1036: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm359, permute_dims1037, out_dtype="void") reshape1367: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1036, R.shape([1, 1, 20, 64])) permute_dims1038: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_v_proj_weight5, axes=None) matmul1037: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm359, permute_dims1038, out_dtype="void") add1232: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1037, model_decoder_layers_1_self_attn_v_proj_bias5) reshape1368: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1232, R.shape([1, 1, 20, 64])) concat97: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1366, reshape1367, reshape1368), axis=2) reshape1369: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat97, R.shape([1, 60, 64])) lv267 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1369), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1370: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv267, R.shape([1, 1, 20, 64])) reshape1371: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1370, R.shape([1, 1, 1280])) permute_dims1039: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_out_proj_weight5, axes=None) matmul1038: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1371, permute_dims1039, out_dtype="void") add1233: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1038, model_decoder_layers_1_self_attn_out_proj_bias5) add1234: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1230, add1233) layer_norm360: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1234, model_decoder_layers_1_encoder_attn_layer_norm_weight5, model_decoder_layers_1_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1040: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_q_proj_weight5, axes=None) matmul1039: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm360, permute_dims1040, out_dtype="void") add1235: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1039, model_decoder_layers_1_encoder_attn_q_proj_bias5) reshape1372: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1235, R.shape([1, 1, 20, 64])) reshape1373: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1372, R.shape([1, 20, 64])) lv268 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1373), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1374: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv268, R.shape([1, 1, 20, 64])) reshape1375: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1374, R.shape([1, 1, 1280])) permute_dims1041: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_out_proj_weight5, axes=None) matmul1040: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1375, permute_dims1041, out_dtype="void") add1236: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1040, model_decoder_layers_1_encoder_attn_out_proj_bias5) add1237: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1234, add1236) layer_norm361: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1237, model_decoder_layers_1_final_layer_norm_weight5, model_decoder_layers_1_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1042: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc1_weight5, axes=None) matmul1041: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm361, permute_dims1042, out_dtype="void") add1238: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1041, model_decoder_layers_1_fc1_bias5) gelu131: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1238) permute_dims1043: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc2_weight5, axes=None) matmul1042: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu131, permute_dims1043, out_dtype="void") add1239: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1042, model_decoder_layers_1_fc2_bias5) add1240: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1237, add1239) layer_norm362: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1240, model_decoder_layers_2_self_attn_layer_norm_weight5, model_decoder_layers_2_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1044: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_q_proj_weight5, axes=None) matmul1043: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm362, permute_dims1044, out_dtype="void") add1241: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1043, model_decoder_layers_2_self_attn_q_proj_bias5) reshape1376: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1241, R.shape([1, 1, 20, 64])) permute_dims1045: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_k_proj_weight5, axes=None) matmul1044: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm362, permute_dims1045, out_dtype="void") reshape1377: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1044, R.shape([1, 1, 20, 64])) permute_dims1046: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_v_proj_weight5, axes=None) matmul1045: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm362, permute_dims1046, out_dtype="void") add1242: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1045, model_decoder_layers_2_self_attn_v_proj_bias5) reshape1378: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1242, R.shape([1, 1, 20, 64])) concat98: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1376, reshape1377, reshape1378), axis=2) reshape1379: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat98, R.shape([1, 60, 64])) lv269 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1379), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1380: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv269, R.shape([1, 1, 20, 64])) reshape1381: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1380, R.shape([1, 1, 1280])) permute_dims1047: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_out_proj_weight5, axes=None) matmul1046: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1381, permute_dims1047, out_dtype="void") add1243: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1046, model_decoder_layers_2_self_attn_out_proj_bias5) add1244: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1240, add1243) layer_norm363: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1244, model_decoder_layers_2_encoder_attn_layer_norm_weight5, model_decoder_layers_2_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1048: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_q_proj_weight5, axes=None) matmul1047: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm363, permute_dims1048, out_dtype="void") add1245: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1047, model_decoder_layers_2_encoder_attn_q_proj_bias5) reshape1382: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1245, R.shape([1, 1, 20, 64])) reshape1383: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1382, R.shape([1, 20, 64])) lv270 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1383), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1384: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv270, R.shape([1, 1, 20, 64])) reshape1385: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1384, R.shape([1, 1, 1280])) permute_dims1049: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_out_proj_weight5, axes=None) matmul1048: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1385, permute_dims1049, out_dtype="void") add1246: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1048, model_decoder_layers_2_encoder_attn_out_proj_bias5) add1247: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1244, add1246) layer_norm364: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1247, model_decoder_layers_2_final_layer_norm_weight5, model_decoder_layers_2_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1050: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc1_weight5, axes=None) matmul1049: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm364, permute_dims1050, out_dtype="void") add1248: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1049, model_decoder_layers_2_fc1_bias5) gelu132: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1248) permute_dims1051: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc2_weight5, axes=None) matmul1050: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu132, permute_dims1051, out_dtype="void") add1249: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1050, model_decoder_layers_2_fc2_bias5) add1250: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1247, add1249) layer_norm365: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1250, model_decoder_layers_3_self_attn_layer_norm_weight5, model_decoder_layers_3_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1052: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_q_proj_weight5, axes=None) matmul1051: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm365, permute_dims1052, out_dtype="void") add1251: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1051, model_decoder_layers_3_self_attn_q_proj_bias5) reshape1386: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1251, R.shape([1, 1, 20, 64])) permute_dims1053: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_k_proj_weight5, axes=None) matmul1052: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm365, permute_dims1053, out_dtype="void") reshape1387: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1052, R.shape([1, 1, 20, 64])) permute_dims1054: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_v_proj_weight5, axes=None) matmul1053: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm365, permute_dims1054, out_dtype="void") add1252: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1053, model_decoder_layers_3_self_attn_v_proj_bias5) reshape1388: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1252, R.shape([1, 1, 20, 64])) concat99: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1386, reshape1387, reshape1388), axis=2) reshape1389: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat99, R.shape([1, 60, 64])) lv271 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1389), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1390: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv271, R.shape([1, 1, 20, 64])) reshape1391: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1390, R.shape([1, 1, 1280])) permute_dims1055: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_out_proj_weight5, axes=None) matmul1054: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1391, permute_dims1055, out_dtype="void") add1253: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1054, model_decoder_layers_3_self_attn_out_proj_bias5) add1254: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1250, add1253) layer_norm366: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1254, model_decoder_layers_3_encoder_attn_layer_norm_weight5, model_decoder_layers_3_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1056: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_q_proj_weight5, axes=None) matmul1055: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm366, permute_dims1056, out_dtype="void") add1255: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1055, model_decoder_layers_3_encoder_attn_q_proj_bias5) reshape1392: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1255, R.shape([1, 1, 20, 64])) reshape1393: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1392, R.shape([1, 20, 64])) lv272 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1393), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1394: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv272, R.shape([1, 1, 20, 64])) reshape1395: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1394, R.shape([1, 1, 1280])) permute_dims1057: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_out_proj_weight5, axes=None) matmul1056: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1395, permute_dims1057, out_dtype="void") add1256: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1056, model_decoder_layers_3_encoder_attn_out_proj_bias5) add1257: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1254, add1256) layer_norm367: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1257, model_decoder_layers_3_final_layer_norm_weight5, model_decoder_layers_3_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1058: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc1_weight5, axes=None) matmul1057: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm367, permute_dims1058, out_dtype="void") add1258: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1057, model_decoder_layers_3_fc1_bias5) gelu133: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1258) permute_dims1059: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc2_weight5, axes=None) matmul1058: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu133, permute_dims1059, out_dtype="void") add1259: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1058, model_decoder_layers_3_fc2_bias5) add1260: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1257, add1259) layer_norm368: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1260, model_decoder_layers_4_self_attn_layer_norm_weight5, model_decoder_layers_4_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1060: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_q_proj_weight5, axes=None) matmul1059: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm368, permute_dims1060, out_dtype="void") add1261: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1059, model_decoder_layers_4_self_attn_q_proj_bias5) reshape1396: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1261, R.shape([1, 1, 20, 64])) permute_dims1061: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_k_proj_weight5, axes=None) matmul1060: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm368, permute_dims1061, out_dtype="void") reshape1397: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1060, R.shape([1, 1, 20, 64])) permute_dims1062: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_v_proj_weight5, axes=None) matmul1061: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm368, permute_dims1062, out_dtype="void") add1262: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1061, model_decoder_layers_4_self_attn_v_proj_bias5) reshape1398: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1262, R.shape([1, 1, 20, 64])) concat100: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1396, reshape1397, reshape1398), axis=2) reshape1399: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat100, R.shape([1, 60, 64])) lv273 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1399), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1400: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv273, R.shape([1, 1, 20, 64])) reshape1401: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1400, R.shape([1, 1, 1280])) permute_dims1063: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_out_proj_weight5, axes=None) matmul1062: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1401, permute_dims1063, out_dtype="void") add1263: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1062, model_decoder_layers_4_self_attn_out_proj_bias5) add1264: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1260, add1263) layer_norm369: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1264, model_decoder_layers_4_encoder_attn_layer_norm_weight5, model_decoder_layers_4_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1064: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_q_proj_weight5, axes=None) matmul1063: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm369, permute_dims1064, out_dtype="void") add1265: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1063, model_decoder_layers_4_encoder_attn_q_proj_bias5) reshape1402: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1265, R.shape([1, 1, 20, 64])) reshape1403: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1402, R.shape([1, 20, 64])) lv274 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1403), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1404: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv274, R.shape([1, 1, 20, 64])) reshape1405: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1404, R.shape([1, 1, 1280])) permute_dims1065: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_out_proj_weight5, axes=None) matmul1064: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1405, permute_dims1065, out_dtype="void") add1266: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1064, model_decoder_layers_4_encoder_attn_out_proj_bias5) add1267: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1264, add1266) layer_norm370: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1267, model_decoder_layers_4_final_layer_norm_weight5, model_decoder_layers_4_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1066: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc1_weight5, axes=None) matmul1065: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm370, permute_dims1066, out_dtype="void") add1268: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1065, model_decoder_layers_4_fc1_bias5) gelu134: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1268) permute_dims1067: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc2_weight5, axes=None) matmul1066: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu134, permute_dims1067, out_dtype="void") add1269: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1066, model_decoder_layers_4_fc2_bias5) add1270: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1267, add1269) layer_norm371: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1270, model_decoder_layers_5_self_attn_layer_norm_weight5, model_decoder_layers_5_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1068: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_q_proj_weight5, axes=None) matmul1067: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm371, permute_dims1068, out_dtype="void") add1271: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1067, model_decoder_layers_5_self_attn_q_proj_bias5) reshape1406: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1271, R.shape([1, 1, 20, 64])) permute_dims1069: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_k_proj_weight5, axes=None) matmul1068: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm371, permute_dims1069, out_dtype="void") reshape1407: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1068, R.shape([1, 1, 20, 64])) permute_dims1070: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_v_proj_weight5, axes=None) matmul1069: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm371, permute_dims1070, out_dtype="void") add1272: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1069, model_decoder_layers_5_self_attn_v_proj_bias5) reshape1408: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1272, R.shape([1, 1, 20, 64])) concat101: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1406, reshape1407, reshape1408), axis=2) reshape1409: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat101, R.shape([1, 60, 64])) lv275 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1409), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1410: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv275, R.shape([1, 1, 20, 64])) reshape1411: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1410, R.shape([1, 1, 1280])) permute_dims1071: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_out_proj_weight5, axes=None) matmul1070: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1411, permute_dims1071, out_dtype="void") add1273: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1070, model_decoder_layers_5_self_attn_out_proj_bias5) add1274: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1270, add1273) layer_norm372: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1274, model_decoder_layers_5_encoder_attn_layer_norm_weight5, model_decoder_layers_5_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1072: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_q_proj_weight5, axes=None) matmul1071: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm372, permute_dims1072, out_dtype="void") add1275: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1071, model_decoder_layers_5_encoder_attn_q_proj_bias5) reshape1412: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1275, R.shape([1, 1, 20, 64])) reshape1413: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1412, R.shape([1, 20, 64])) lv276 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1413), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1414: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv276, R.shape([1, 1, 20, 64])) reshape1415: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1414, R.shape([1, 1, 1280])) permute_dims1073: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_out_proj_weight5, axes=None) matmul1072: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1415, permute_dims1073, out_dtype="void") add1276: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1072, model_decoder_layers_5_encoder_attn_out_proj_bias5) add1277: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1274, add1276) layer_norm373: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1277, model_decoder_layers_5_final_layer_norm_weight5, model_decoder_layers_5_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1074: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc1_weight5, axes=None) matmul1073: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm373, permute_dims1074, out_dtype="void") add1278: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1073, model_decoder_layers_5_fc1_bias5) gelu135: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1278) permute_dims1075: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc2_weight5, axes=None) matmul1074: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu135, permute_dims1075, out_dtype="void") add1279: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1074, model_decoder_layers_5_fc2_bias5) add1280: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1277, add1279) layer_norm374: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1280, model_decoder_layers_6_self_attn_layer_norm_weight5, model_decoder_layers_6_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1076: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_q_proj_weight5, axes=None) matmul1075: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm374, permute_dims1076, out_dtype="void") add1281: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1075, model_decoder_layers_6_self_attn_q_proj_bias5) reshape1416: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1281, R.shape([1, 1, 20, 64])) permute_dims1077: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_k_proj_weight5, axes=None) matmul1076: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm374, permute_dims1077, out_dtype="void") reshape1417: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1076, R.shape([1, 1, 20, 64])) permute_dims1078: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_v_proj_weight5, axes=None) matmul1077: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm374, permute_dims1078, out_dtype="void") add1282: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1077, model_decoder_layers_6_self_attn_v_proj_bias5) reshape1418: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1282, R.shape([1, 1, 20, 64])) concat102: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1416, reshape1417, reshape1418), axis=2) reshape1419: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat102, R.shape([1, 60, 64])) lv277 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1419), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1420: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv277, R.shape([1, 1, 20, 64])) reshape1421: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1420, R.shape([1, 1, 1280])) permute_dims1079: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_out_proj_weight5, axes=None) matmul1078: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1421, permute_dims1079, out_dtype="void") add1283: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1078, model_decoder_layers_6_self_attn_out_proj_bias5) add1284: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1280, add1283) layer_norm375: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1284, model_decoder_layers_6_encoder_attn_layer_norm_weight5, model_decoder_layers_6_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1080: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_q_proj_weight5, axes=None) matmul1079: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm375, permute_dims1080, out_dtype="void") add1285: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1079, model_decoder_layers_6_encoder_attn_q_proj_bias5) reshape1422: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1285, R.shape([1, 1, 20, 64])) reshape1423: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1422, R.shape([1, 20, 64])) lv278 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1423), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1424: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv278, R.shape([1, 1, 20, 64])) reshape1425: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1424, R.shape([1, 1, 1280])) permute_dims1081: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_out_proj_weight5, axes=None) matmul1080: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1425, permute_dims1081, out_dtype="void") add1286: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1080, model_decoder_layers_6_encoder_attn_out_proj_bias5) add1287: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1284, add1286) layer_norm376: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1287, model_decoder_layers_6_final_layer_norm_weight5, model_decoder_layers_6_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1082: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc1_weight5, axes=None) matmul1081: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm376, permute_dims1082, out_dtype="void") add1288: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1081, model_decoder_layers_6_fc1_bias5) gelu136: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1288) permute_dims1083: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc2_weight5, axes=None) matmul1082: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu136, permute_dims1083, out_dtype="void") add1289: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1082, model_decoder_layers_6_fc2_bias5) add1290: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1287, add1289) layer_norm377: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1290, model_decoder_layers_7_self_attn_layer_norm_weight5, model_decoder_layers_7_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1084: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_q_proj_weight5, axes=None) matmul1083: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm377, permute_dims1084, out_dtype="void") add1291: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1083, model_decoder_layers_7_self_attn_q_proj_bias5) reshape1426: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1291, R.shape([1, 1, 20, 64])) permute_dims1085: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_k_proj_weight5, axes=None) matmul1084: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm377, permute_dims1085, out_dtype="void") reshape1427: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1084, R.shape([1, 1, 20, 64])) permute_dims1086: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_v_proj_weight5, axes=None) matmul1085: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm377, permute_dims1086, out_dtype="void") add1292: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1085, model_decoder_layers_7_self_attn_v_proj_bias5) reshape1428: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1292, R.shape([1, 1, 20, 64])) concat103: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1426, reshape1427, reshape1428), axis=2) reshape1429: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat103, R.shape([1, 60, 64])) lv279 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1429), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1430: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv279, R.shape([1, 1, 20, 64])) reshape1431: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1430, R.shape([1, 1, 1280])) permute_dims1087: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_out_proj_weight5, axes=None) matmul1086: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1431, permute_dims1087, out_dtype="void") add1293: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1086, model_decoder_layers_7_self_attn_out_proj_bias5) add1294: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1290, add1293) layer_norm378: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1294, model_decoder_layers_7_encoder_attn_layer_norm_weight5, model_decoder_layers_7_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1088: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_q_proj_weight5, axes=None) matmul1087: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm378, permute_dims1088, out_dtype="void") add1295: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1087, model_decoder_layers_7_encoder_attn_q_proj_bias5) reshape1432: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1295, R.shape([1, 1, 20, 64])) reshape1433: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1432, R.shape([1, 20, 64])) lv280 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1433), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1434: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv280, R.shape([1, 1, 20, 64])) reshape1435: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1434, R.shape([1, 1, 1280])) permute_dims1089: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_out_proj_weight5, axes=None) matmul1088: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1435, permute_dims1089, out_dtype="void") add1296: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1088, model_decoder_layers_7_encoder_attn_out_proj_bias5) add1297: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1294, add1296) layer_norm379: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1297, model_decoder_layers_7_final_layer_norm_weight5, model_decoder_layers_7_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1090: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc1_weight5, axes=None) matmul1089: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm379, permute_dims1090, out_dtype="void") add1298: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1089, model_decoder_layers_7_fc1_bias5) gelu137: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1298) permute_dims1091: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc2_weight5, axes=None) matmul1090: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu137, permute_dims1091, out_dtype="void") add1299: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1090, model_decoder_layers_7_fc2_bias5) add1300: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1297, add1299) layer_norm380: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1300, model_decoder_layers_8_self_attn_layer_norm_weight5, model_decoder_layers_8_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1092: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_q_proj_weight5, axes=None) matmul1091: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm380, permute_dims1092, out_dtype="void") add1301: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1091, model_decoder_layers_8_self_attn_q_proj_bias5) reshape1436: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1301, R.shape([1, 1, 20, 64])) permute_dims1093: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_k_proj_weight5, axes=None) matmul1092: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm380, permute_dims1093, out_dtype="void") reshape1437: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1092, R.shape([1, 1, 20, 64])) permute_dims1094: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_v_proj_weight5, axes=None) matmul1093: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm380, permute_dims1094, out_dtype="void") add1302: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1093, model_decoder_layers_8_self_attn_v_proj_bias5) reshape1438: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1302, R.shape([1, 1, 20, 64])) concat104: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1436, reshape1437, reshape1438), axis=2) reshape1439: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat104, R.shape([1, 60, 64])) lv281 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1439), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1440: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv281, R.shape([1, 1, 20, 64])) reshape1441: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1440, R.shape([1, 1, 1280])) permute_dims1095: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_out_proj_weight5, axes=None) matmul1094: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1441, permute_dims1095, out_dtype="void") add1303: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1094, model_decoder_layers_8_self_attn_out_proj_bias5) add1304: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1300, add1303) layer_norm381: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1304, model_decoder_layers_8_encoder_attn_layer_norm_weight5, model_decoder_layers_8_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1096: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_q_proj_weight5, axes=None) matmul1095: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm381, permute_dims1096, out_dtype="void") add1305: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1095, model_decoder_layers_8_encoder_attn_q_proj_bias5) reshape1442: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1305, R.shape([1, 1, 20, 64])) reshape1443: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1442, R.shape([1, 20, 64])) lv282 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1443), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1444: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv282, R.shape([1, 1, 20, 64])) reshape1445: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1444, R.shape([1, 1, 1280])) permute_dims1097: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_out_proj_weight5, axes=None) matmul1096: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1445, permute_dims1097, out_dtype="void") add1306: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1096, model_decoder_layers_8_encoder_attn_out_proj_bias5) add1307: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1304, add1306) layer_norm382: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1307, model_decoder_layers_8_final_layer_norm_weight5, model_decoder_layers_8_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1098: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc1_weight5, axes=None) matmul1097: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm382, permute_dims1098, out_dtype="void") add1308: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1097, model_decoder_layers_8_fc1_bias5) gelu138: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1308) permute_dims1099: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc2_weight5, axes=None) matmul1098: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu138, permute_dims1099, out_dtype="void") add1309: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1098, model_decoder_layers_8_fc2_bias5) add1310: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1307, add1309) layer_norm383: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1310, model_decoder_layers_9_self_attn_layer_norm_weight5, model_decoder_layers_9_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1100: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_q_proj_weight5, axes=None) matmul1099: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm383, permute_dims1100, out_dtype="void") add1311: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1099, model_decoder_layers_9_self_attn_q_proj_bias5) reshape1446: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1311, R.shape([1, 1, 20, 64])) permute_dims1101: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_k_proj_weight5, axes=None) matmul1100: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm383, permute_dims1101, out_dtype="void") reshape1447: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1100, R.shape([1, 1, 20, 64])) permute_dims1102: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_v_proj_weight5, axes=None) matmul1101: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm383, permute_dims1102, out_dtype="void") add1312: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1101, model_decoder_layers_9_self_attn_v_proj_bias5) reshape1448: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1312, R.shape([1, 1, 20, 64])) concat105: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1446, reshape1447, reshape1448), axis=2) reshape1449: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat105, R.shape([1, 60, 64])) lv283 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1449), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1450: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv283, R.shape([1, 1, 20, 64])) reshape1451: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1450, R.shape([1, 1, 1280])) permute_dims1103: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_out_proj_weight5, axes=None) matmul1102: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1451, permute_dims1103, out_dtype="void") add1313: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1102, model_decoder_layers_9_self_attn_out_proj_bias5) add1314: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1310, add1313) layer_norm384: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1314, model_decoder_layers_9_encoder_attn_layer_norm_weight5, model_decoder_layers_9_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1104: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_q_proj_weight5, axes=None) matmul1103: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm384, permute_dims1104, out_dtype="void") add1315: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1103, model_decoder_layers_9_encoder_attn_q_proj_bias5) reshape1452: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1315, R.shape([1, 1, 20, 64])) reshape1453: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1452, R.shape([1, 20, 64])) lv284 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1453), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1454: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv284, R.shape([1, 1, 20, 64])) reshape1455: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1454, R.shape([1, 1, 1280])) permute_dims1105: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_out_proj_weight5, axes=None) matmul1104: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1455, permute_dims1105, out_dtype="void") add1316: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1104, model_decoder_layers_9_encoder_attn_out_proj_bias5) add1317: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1314, add1316) layer_norm385: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1317, model_decoder_layers_9_final_layer_norm_weight5, model_decoder_layers_9_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1106: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc1_weight5, axes=None) matmul1105: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm385, permute_dims1106, out_dtype="void") add1318: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1105, model_decoder_layers_9_fc1_bias5) gelu139: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1318) permute_dims1107: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc2_weight5, axes=None) matmul1106: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu139, permute_dims1107, out_dtype="void") add1319: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1106, model_decoder_layers_9_fc2_bias5) add1320: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1317, add1319) layer_norm386: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1320, model_decoder_layers_10_self_attn_layer_norm_weight5, model_decoder_layers_10_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1108: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_q_proj_weight5, axes=None) matmul1107: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm386, permute_dims1108, out_dtype="void") add1321: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1107, model_decoder_layers_10_self_attn_q_proj_bias5) reshape1456: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1321, R.shape([1, 1, 20, 64])) permute_dims1109: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_k_proj_weight5, axes=None) matmul1108: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm386, permute_dims1109, out_dtype="void") reshape1457: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1108, R.shape([1, 1, 20, 64])) permute_dims1110: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_v_proj_weight5, axes=None) matmul1109: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm386, permute_dims1110, out_dtype="void") add1322: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1109, model_decoder_layers_10_self_attn_v_proj_bias5) reshape1458: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1322, R.shape([1, 1, 20, 64])) concat106: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1456, reshape1457, reshape1458), axis=2) reshape1459: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat106, R.shape([1, 60, 64])) lv285 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1459), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1460: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv285, R.shape([1, 1, 20, 64])) reshape1461: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1460, R.shape([1, 1, 1280])) permute_dims1111: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_out_proj_weight5, axes=None) matmul1110: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1461, permute_dims1111, out_dtype="void") add1323: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1110, model_decoder_layers_10_self_attn_out_proj_bias5) add1324: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1320, add1323) layer_norm387: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1324, model_decoder_layers_10_encoder_attn_layer_norm_weight5, model_decoder_layers_10_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1112: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_q_proj_weight5, axes=None) matmul1111: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm387, permute_dims1112, out_dtype="void") add1325: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1111, model_decoder_layers_10_encoder_attn_q_proj_bias5) reshape1462: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1325, R.shape([1, 1, 20, 64])) reshape1463: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1462, R.shape([1, 20, 64])) lv286 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1463), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1464: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv286, R.shape([1, 1, 20, 64])) reshape1465: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1464, R.shape([1, 1, 1280])) permute_dims1113: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_out_proj_weight5, axes=None) matmul1112: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1465, permute_dims1113, out_dtype="void") add1326: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1112, model_decoder_layers_10_encoder_attn_out_proj_bias5) add1327: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1324, add1326) layer_norm388: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1327, model_decoder_layers_10_final_layer_norm_weight5, model_decoder_layers_10_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1114: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc1_weight5, axes=None) matmul1113: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm388, permute_dims1114, out_dtype="void") add1328: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1113, model_decoder_layers_10_fc1_bias5) gelu140: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1328) permute_dims1115: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc2_weight5, axes=None) matmul1114: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu140, permute_dims1115, out_dtype="void") add1329: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1114, model_decoder_layers_10_fc2_bias5) add1330: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1327, add1329) layer_norm389: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1330, model_decoder_layers_11_self_attn_layer_norm_weight5, model_decoder_layers_11_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1116: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_q_proj_weight5, axes=None) matmul1115: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm389, permute_dims1116, out_dtype="void") add1331: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1115, model_decoder_layers_11_self_attn_q_proj_bias5) reshape1466: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1331, R.shape([1, 1, 20, 64])) permute_dims1117: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_k_proj_weight5, axes=None) matmul1116: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm389, permute_dims1117, out_dtype="void") reshape1467: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1116, R.shape([1, 1, 20, 64])) permute_dims1118: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_v_proj_weight5, axes=None) matmul1117: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm389, permute_dims1118, out_dtype="void") add1332: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1117, model_decoder_layers_11_self_attn_v_proj_bias5) reshape1468: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1332, R.shape([1, 1, 20, 64])) concat107: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1466, reshape1467, reshape1468), axis=2) reshape1469: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat107, R.shape([1, 60, 64])) lv287 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1469), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1470: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv287, R.shape([1, 1, 20, 64])) reshape1471: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1470, R.shape([1, 1, 1280])) permute_dims1119: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_out_proj_weight5, axes=None) matmul1118: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1471, permute_dims1119, out_dtype="void") add1333: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1118, model_decoder_layers_11_self_attn_out_proj_bias5) add1334: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1330, add1333) layer_norm390: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1334, model_decoder_layers_11_encoder_attn_layer_norm_weight5, model_decoder_layers_11_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1120: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_q_proj_weight5, axes=None) matmul1119: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm390, permute_dims1120, out_dtype="void") add1335: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1119, model_decoder_layers_11_encoder_attn_q_proj_bias5) reshape1472: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1335, R.shape([1, 1, 20, 64])) reshape1473: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1472, R.shape([1, 20, 64])) lv288 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1473), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1474: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv288, R.shape([1, 1, 20, 64])) reshape1475: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1474, R.shape([1, 1, 1280])) permute_dims1121: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_out_proj_weight5, axes=None) matmul1120: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1475, permute_dims1121, out_dtype="void") add1336: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1120, model_decoder_layers_11_encoder_attn_out_proj_bias5) add1337: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1334, add1336) layer_norm391: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1337, model_decoder_layers_11_final_layer_norm_weight5, model_decoder_layers_11_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1122: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc1_weight5, axes=None) matmul1121: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm391, permute_dims1122, out_dtype="void") add1338: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1121, model_decoder_layers_11_fc1_bias5) gelu141: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1338) permute_dims1123: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc2_weight5, axes=None) matmul1122: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu141, permute_dims1123, out_dtype="void") add1339: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1122, model_decoder_layers_11_fc2_bias5) add1340: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1337, add1339) layer_norm392: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1340, model_decoder_layers_12_self_attn_layer_norm_weight5, model_decoder_layers_12_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1124: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_q_proj_weight5, axes=None) matmul1123: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm392, permute_dims1124, out_dtype="void") add1341: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1123, model_decoder_layers_12_self_attn_q_proj_bias5) reshape1476: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1341, R.shape([1, 1, 20, 64])) permute_dims1125: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_k_proj_weight5, axes=None) matmul1124: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm392, permute_dims1125, out_dtype="void") reshape1477: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1124, R.shape([1, 1, 20, 64])) permute_dims1126: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_v_proj_weight5, axes=None) matmul1125: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm392, permute_dims1126, out_dtype="void") add1342: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1125, model_decoder_layers_12_self_attn_v_proj_bias5) reshape1478: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1342, R.shape([1, 1, 20, 64])) concat108: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1476, reshape1477, reshape1478), axis=2) reshape1479: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat108, R.shape([1, 60, 64])) lv289 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1479), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1480: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv289, R.shape([1, 1, 20, 64])) reshape1481: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1480, R.shape([1, 1, 1280])) permute_dims1127: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_out_proj_weight5, axes=None) matmul1126: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1481, permute_dims1127, out_dtype="void") add1343: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1126, model_decoder_layers_12_self_attn_out_proj_bias5) add1344: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1340, add1343) layer_norm393: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1344, model_decoder_layers_12_encoder_attn_layer_norm_weight5, model_decoder_layers_12_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1128: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_q_proj_weight5, axes=None) matmul1127: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm393, permute_dims1128, out_dtype="void") add1345: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1127, model_decoder_layers_12_encoder_attn_q_proj_bias5) reshape1482: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1345, R.shape([1, 1, 20, 64])) reshape1483: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1482, R.shape([1, 20, 64])) lv290 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1483), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1484: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv290, R.shape([1, 1, 20, 64])) reshape1485: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1484, R.shape([1, 1, 1280])) permute_dims1129: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_out_proj_weight5, axes=None) matmul1128: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1485, permute_dims1129, out_dtype="void") add1346: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1128, model_decoder_layers_12_encoder_attn_out_proj_bias5) add1347: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1344, add1346) layer_norm394: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1347, model_decoder_layers_12_final_layer_norm_weight5, model_decoder_layers_12_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1130: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc1_weight5, axes=None) matmul1129: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm394, permute_dims1130, out_dtype="void") add1348: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1129, model_decoder_layers_12_fc1_bias5) gelu142: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1348) permute_dims1131: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc2_weight5, axes=None) matmul1130: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu142, permute_dims1131, out_dtype="void") add1349: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1130, model_decoder_layers_12_fc2_bias5) add1350: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1347, add1349) layer_norm395: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1350, model_decoder_layers_13_self_attn_layer_norm_weight5, model_decoder_layers_13_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1132: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_q_proj_weight5, axes=None) matmul1131: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm395, permute_dims1132, out_dtype="void") add1351: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1131, model_decoder_layers_13_self_attn_q_proj_bias5) reshape1486: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1351, R.shape([1, 1, 20, 64])) permute_dims1133: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_k_proj_weight5, axes=None) matmul1132: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm395, permute_dims1133, out_dtype="void") reshape1487: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1132, R.shape([1, 1, 20, 64])) permute_dims1134: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_v_proj_weight5, axes=None) matmul1133: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm395, permute_dims1134, out_dtype="void") add1352: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1133, model_decoder_layers_13_self_attn_v_proj_bias5) reshape1488: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1352, R.shape([1, 1, 20, 64])) concat109: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1486, reshape1487, reshape1488), axis=2) reshape1489: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat109, R.shape([1, 60, 64])) lv291 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1489), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1490: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv291, R.shape([1, 1, 20, 64])) reshape1491: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1490, R.shape([1, 1, 1280])) permute_dims1135: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_out_proj_weight5, axes=None) matmul1134: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1491, permute_dims1135, out_dtype="void") add1353: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1134, model_decoder_layers_13_self_attn_out_proj_bias5) add1354: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1350, add1353) layer_norm396: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1354, model_decoder_layers_13_encoder_attn_layer_norm_weight5, model_decoder_layers_13_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1136: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_q_proj_weight5, axes=None) matmul1135: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm396, permute_dims1136, out_dtype="void") add1355: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1135, model_decoder_layers_13_encoder_attn_q_proj_bias5) reshape1492: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1355, R.shape([1, 1, 20, 64])) reshape1493: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1492, R.shape([1, 20, 64])) lv292 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1493), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1494: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv292, R.shape([1, 1, 20, 64])) reshape1495: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1494, R.shape([1, 1, 1280])) permute_dims1137: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_out_proj_weight5, axes=None) matmul1136: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1495, permute_dims1137, out_dtype="void") add1356: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1136, model_decoder_layers_13_encoder_attn_out_proj_bias5) add1357: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1354, add1356) layer_norm397: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1357, model_decoder_layers_13_final_layer_norm_weight5, model_decoder_layers_13_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1138: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc1_weight5, axes=None) matmul1137: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm397, permute_dims1138, out_dtype="void") add1358: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1137, model_decoder_layers_13_fc1_bias5) gelu143: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1358) permute_dims1139: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc2_weight5, axes=None) matmul1138: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu143, permute_dims1139, out_dtype="void") add1359: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1138, model_decoder_layers_13_fc2_bias5) add1360: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1357, add1359) layer_norm398: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1360, model_decoder_layers_14_self_attn_layer_norm_weight5, model_decoder_layers_14_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1140: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_q_proj_weight5, axes=None) matmul1139: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm398, permute_dims1140, out_dtype="void") add1361: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1139, model_decoder_layers_14_self_attn_q_proj_bias5) reshape1496: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1361, R.shape([1, 1, 20, 64])) permute_dims1141: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_k_proj_weight5, axes=None) matmul1140: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm398, permute_dims1141, out_dtype="void") reshape1497: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1140, R.shape([1, 1, 20, 64])) permute_dims1142: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_v_proj_weight5, axes=None) matmul1141: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm398, permute_dims1142, out_dtype="void") add1362: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1141, model_decoder_layers_14_self_attn_v_proj_bias5) reshape1498: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1362, R.shape([1, 1, 20, 64])) concat110: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1496, reshape1497, reshape1498), axis=2) reshape1499: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat110, R.shape([1, 60, 64])) lv293 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1499), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1500: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv293, R.shape([1, 1, 20, 64])) reshape1501: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1500, R.shape([1, 1, 1280])) permute_dims1143: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_out_proj_weight5, axes=None) matmul1142: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1501, permute_dims1143, out_dtype="void") add1363: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1142, model_decoder_layers_14_self_attn_out_proj_bias5) add1364: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1360, add1363) layer_norm399: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1364, model_decoder_layers_14_encoder_attn_layer_norm_weight5, model_decoder_layers_14_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1144: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_q_proj_weight5, axes=None) matmul1143: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm399, permute_dims1144, out_dtype="void") add1365: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1143, model_decoder_layers_14_encoder_attn_q_proj_bias5) reshape1502: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1365, R.shape([1, 1, 20, 64])) reshape1503: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1502, R.shape([1, 20, 64])) lv294 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1503), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1504: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv294, R.shape([1, 1, 20, 64])) reshape1505: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1504, R.shape([1, 1, 1280])) permute_dims1145: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_out_proj_weight5, axes=None) matmul1144: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1505, permute_dims1145, out_dtype="void") add1366: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1144, model_decoder_layers_14_encoder_attn_out_proj_bias5) add1367: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1364, add1366) layer_norm400: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1367, model_decoder_layers_14_final_layer_norm_weight5, model_decoder_layers_14_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1146: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc1_weight5, axes=None) matmul1145: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm400, permute_dims1146, out_dtype="void") add1368: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1145, model_decoder_layers_14_fc1_bias5) gelu144: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1368) permute_dims1147: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc2_weight5, axes=None) matmul1146: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu144, permute_dims1147, out_dtype="void") add1369: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1146, model_decoder_layers_14_fc2_bias5) add1370: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1367, add1369) layer_norm401: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1370, model_decoder_layers_15_self_attn_layer_norm_weight5, model_decoder_layers_15_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1148: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_q_proj_weight5, axes=None) matmul1147: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm401, permute_dims1148, out_dtype="void") add1371: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1147, model_decoder_layers_15_self_attn_q_proj_bias5) reshape1506: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1371, R.shape([1, 1, 20, 64])) permute_dims1149: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_k_proj_weight5, axes=None) matmul1148: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm401, permute_dims1149, out_dtype="void") reshape1507: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1148, R.shape([1, 1, 20, 64])) permute_dims1150: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_v_proj_weight5, axes=None) matmul1149: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm401, permute_dims1150, out_dtype="void") add1372: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1149, model_decoder_layers_15_self_attn_v_proj_bias5) reshape1508: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1372, R.shape([1, 1, 20, 64])) concat111: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1506, reshape1507, reshape1508), axis=2) reshape1509: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat111, R.shape([1, 60, 64])) lv295 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1509), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1510: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv295, R.shape([1, 1, 20, 64])) reshape1511: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1510, R.shape([1, 1, 1280])) permute_dims1151: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_out_proj_weight5, axes=None) matmul1150: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1511, permute_dims1151, out_dtype="void") add1373: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1150, model_decoder_layers_15_self_attn_out_proj_bias5) add1374: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1370, add1373) layer_norm402: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1374, model_decoder_layers_15_encoder_attn_layer_norm_weight5, model_decoder_layers_15_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1152: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_q_proj_weight5, axes=None) matmul1151: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm402, permute_dims1152, out_dtype="void") add1375: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1151, model_decoder_layers_15_encoder_attn_q_proj_bias5) reshape1512: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1375, R.shape([1, 1, 20, 64])) reshape1513: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1512, R.shape([1, 20, 64])) lv296 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1513), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1514: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv296, R.shape([1, 1, 20, 64])) reshape1515: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1514, R.shape([1, 1, 1280])) permute_dims1153: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_out_proj_weight5, axes=None) matmul1152: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1515, permute_dims1153, out_dtype="void") add1376: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1152, model_decoder_layers_15_encoder_attn_out_proj_bias5) add1377: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1374, add1376) layer_norm403: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1377, model_decoder_layers_15_final_layer_norm_weight5, model_decoder_layers_15_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1154: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc1_weight5, axes=None) matmul1153: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm403, permute_dims1154, out_dtype="void") add1378: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1153, model_decoder_layers_15_fc1_bias5) gelu145: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1378) permute_dims1155: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc2_weight5, axes=None) matmul1154: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu145, permute_dims1155, out_dtype="void") add1379: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1154, model_decoder_layers_15_fc2_bias5) add1380: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1377, add1379) layer_norm404: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1380, model_decoder_layers_16_self_attn_layer_norm_weight5, model_decoder_layers_16_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1156: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_q_proj_weight5, axes=None) matmul1155: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm404, permute_dims1156, out_dtype="void") add1381: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1155, model_decoder_layers_16_self_attn_q_proj_bias5) reshape1516: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1381, R.shape([1, 1, 20, 64])) permute_dims1157: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_k_proj_weight5, axes=None) matmul1156: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm404, permute_dims1157, out_dtype="void") reshape1517: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1156, R.shape([1, 1, 20, 64])) permute_dims1158: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_v_proj_weight5, axes=None) matmul1157: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm404, permute_dims1158, out_dtype="void") add1382: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1157, model_decoder_layers_16_self_attn_v_proj_bias5) reshape1518: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1382, R.shape([1, 1, 20, 64])) concat112: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1516, reshape1517, reshape1518), axis=2) reshape1519: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat112, R.shape([1, 60, 64])) lv297 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1519), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1520: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv297, R.shape([1, 1, 20, 64])) reshape1521: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1520, R.shape([1, 1, 1280])) permute_dims1159: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_out_proj_weight5, axes=None) matmul1158: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1521, permute_dims1159, out_dtype="void") add1383: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1158, model_decoder_layers_16_self_attn_out_proj_bias5) add1384: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1380, add1383) layer_norm405: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1384, model_decoder_layers_16_encoder_attn_layer_norm_weight5, model_decoder_layers_16_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1160: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_q_proj_weight5, axes=None) matmul1159: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm405, permute_dims1160, out_dtype="void") add1385: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1159, model_decoder_layers_16_encoder_attn_q_proj_bias5) reshape1522: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1385, R.shape([1, 1, 20, 64])) reshape1523: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1522, R.shape([1, 20, 64])) lv298 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1523), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1524: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv298, R.shape([1, 1, 20, 64])) reshape1525: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1524, R.shape([1, 1, 1280])) permute_dims1161: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_out_proj_weight5, axes=None) matmul1160: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1525, permute_dims1161, out_dtype="void") add1386: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1160, model_decoder_layers_16_encoder_attn_out_proj_bias5) add1387: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1384, add1386) layer_norm406: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1387, model_decoder_layers_16_final_layer_norm_weight5, model_decoder_layers_16_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1162: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc1_weight5, axes=None) matmul1161: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm406, permute_dims1162, out_dtype="void") add1388: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1161, model_decoder_layers_16_fc1_bias5) gelu146: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1388) permute_dims1163: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc2_weight5, axes=None) matmul1162: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu146, permute_dims1163, out_dtype="void") add1389: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1162, model_decoder_layers_16_fc2_bias5) add1390: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1387, add1389) layer_norm407: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1390, model_decoder_layers_17_self_attn_layer_norm_weight5, model_decoder_layers_17_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1164: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_q_proj_weight5, axes=None) matmul1163: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm407, permute_dims1164, out_dtype="void") add1391: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1163, model_decoder_layers_17_self_attn_q_proj_bias5) reshape1526: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1391, R.shape([1, 1, 20, 64])) permute_dims1165: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_k_proj_weight5, axes=None) matmul1164: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm407, permute_dims1165, out_dtype="void") reshape1527: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1164, R.shape([1, 1, 20, 64])) permute_dims1166: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_v_proj_weight5, axes=None) matmul1165: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm407, permute_dims1166, out_dtype="void") add1392: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1165, model_decoder_layers_17_self_attn_v_proj_bias5) reshape1528: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1392, R.shape([1, 1, 20, 64])) concat113: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1526, reshape1527, reshape1528), axis=2) reshape1529: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat113, R.shape([1, 60, 64])) lv299 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1529), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1530: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv299, R.shape([1, 1, 20, 64])) reshape1531: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1530, R.shape([1, 1, 1280])) permute_dims1167: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_out_proj_weight5, axes=None) matmul1166: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1531, permute_dims1167, out_dtype="void") add1393: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1166, model_decoder_layers_17_self_attn_out_proj_bias5) add1394: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1390, add1393) layer_norm408: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1394, model_decoder_layers_17_encoder_attn_layer_norm_weight5, model_decoder_layers_17_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1168: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_q_proj_weight5, axes=None) matmul1167: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm408, permute_dims1168, out_dtype="void") add1395: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1167, model_decoder_layers_17_encoder_attn_q_proj_bias5) reshape1532: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1395, R.shape([1, 1, 20, 64])) reshape1533: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1532, R.shape([1, 20, 64])) lv300 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1533), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1534: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv300, R.shape([1, 1, 20, 64])) reshape1535: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1534, R.shape([1, 1, 1280])) permute_dims1169: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_out_proj_weight5, axes=None) matmul1168: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1535, permute_dims1169, out_dtype="void") add1396: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1168, model_decoder_layers_17_encoder_attn_out_proj_bias5) add1397: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1394, add1396) layer_norm409: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1397, model_decoder_layers_17_final_layer_norm_weight5, model_decoder_layers_17_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1170: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc1_weight5, axes=None) matmul1169: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm409, permute_dims1170, out_dtype="void") add1398: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1169, model_decoder_layers_17_fc1_bias5) gelu147: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1398) permute_dims1171: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc2_weight5, axes=None) matmul1170: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu147, permute_dims1171, out_dtype="void") add1399: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1170, model_decoder_layers_17_fc2_bias5) add1400: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1397, add1399) layer_norm410: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1400, model_decoder_layers_18_self_attn_layer_norm_weight5, model_decoder_layers_18_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1172: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_q_proj_weight5, axes=None) matmul1171: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm410, permute_dims1172, out_dtype="void") add1401: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1171, model_decoder_layers_18_self_attn_q_proj_bias5) reshape1536: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1401, R.shape([1, 1, 20, 64])) permute_dims1173: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_k_proj_weight5, axes=None) matmul1172: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm410, permute_dims1173, out_dtype="void") reshape1537: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1172, R.shape([1, 1, 20, 64])) permute_dims1174: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_v_proj_weight5, axes=None) matmul1173: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm410, permute_dims1174, out_dtype="void") add1402: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1173, model_decoder_layers_18_self_attn_v_proj_bias5) reshape1538: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1402, R.shape([1, 1, 20, 64])) concat114: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1536, reshape1537, reshape1538), axis=2) reshape1539: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat114, R.shape([1, 60, 64])) lv301 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1539), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1540: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv301, R.shape([1, 1, 20, 64])) reshape1541: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1540, R.shape([1, 1, 1280])) permute_dims1175: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_out_proj_weight5, axes=None) matmul1174: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1541, permute_dims1175, out_dtype="void") add1403: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1174, model_decoder_layers_18_self_attn_out_proj_bias5) add1404: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1400, add1403) layer_norm411: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1404, model_decoder_layers_18_encoder_attn_layer_norm_weight5, model_decoder_layers_18_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1176: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_q_proj_weight5, axes=None) matmul1175: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm411, permute_dims1176, out_dtype="void") add1405: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1175, model_decoder_layers_18_encoder_attn_q_proj_bias5) reshape1542: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1405, R.shape([1, 1, 20, 64])) reshape1543: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1542, R.shape([1, 20, 64])) lv302 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1543), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1544: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv302, R.shape([1, 1, 20, 64])) reshape1545: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1544, R.shape([1, 1, 1280])) permute_dims1177: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_out_proj_weight5, axes=None) matmul1176: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1545, permute_dims1177, out_dtype="void") add1406: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1176, model_decoder_layers_18_encoder_attn_out_proj_bias5) add1407: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1404, add1406) layer_norm412: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1407, model_decoder_layers_18_final_layer_norm_weight5, model_decoder_layers_18_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1178: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc1_weight5, axes=None) matmul1177: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm412, permute_dims1178, out_dtype="void") add1408: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1177, model_decoder_layers_18_fc1_bias5) gelu148: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1408) permute_dims1179: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc2_weight5, axes=None) matmul1178: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu148, permute_dims1179, out_dtype="void") add1409: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1178, model_decoder_layers_18_fc2_bias5) add1410: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1407, add1409) layer_norm413: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1410, model_decoder_layers_19_self_attn_layer_norm_weight5, model_decoder_layers_19_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1180: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_q_proj_weight5, axes=None) matmul1179: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm413, permute_dims1180, out_dtype="void") add1411: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1179, model_decoder_layers_19_self_attn_q_proj_bias5) reshape1546: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1411, R.shape([1, 1, 20, 64])) permute_dims1181: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_k_proj_weight5, axes=None) matmul1180: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm413, permute_dims1181, out_dtype="void") reshape1547: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1180, R.shape([1, 1, 20, 64])) permute_dims1182: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_v_proj_weight5, axes=None) matmul1181: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm413, permute_dims1182, out_dtype="void") add1412: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1181, model_decoder_layers_19_self_attn_v_proj_bias5) reshape1548: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1412, R.shape([1, 1, 20, 64])) concat115: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1546, reshape1547, reshape1548), axis=2) reshape1549: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat115, R.shape([1, 60, 64])) lv303 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1549), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1550: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv303, R.shape([1, 1, 20, 64])) reshape1551: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1550, R.shape([1, 1, 1280])) permute_dims1183: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_out_proj_weight5, axes=None) matmul1182: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1551, permute_dims1183, out_dtype="void") add1413: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1182, model_decoder_layers_19_self_attn_out_proj_bias5) add1414: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1410, add1413) layer_norm414: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1414, model_decoder_layers_19_encoder_attn_layer_norm_weight5, model_decoder_layers_19_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1184: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_q_proj_weight5, axes=None) matmul1183: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm414, permute_dims1184, out_dtype="void") add1415: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1183, model_decoder_layers_19_encoder_attn_q_proj_bias5) reshape1552: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1415, R.shape([1, 1, 20, 64])) reshape1553: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1552, R.shape([1, 20, 64])) lv304 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1553), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1554: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv304, R.shape([1, 1, 20, 64])) reshape1555: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1554, R.shape([1, 1, 1280])) permute_dims1185: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_out_proj_weight5, axes=None) matmul1184: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1555, permute_dims1185, out_dtype="void") add1416: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1184, model_decoder_layers_19_encoder_attn_out_proj_bias5) add1417: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1414, add1416) layer_norm415: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1417, model_decoder_layers_19_final_layer_norm_weight5, model_decoder_layers_19_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1186: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc1_weight5, axes=None) matmul1185: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm415, permute_dims1186, out_dtype="void") add1418: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1185, model_decoder_layers_19_fc1_bias5) gelu149: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1418) permute_dims1187: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc2_weight5, axes=None) matmul1186: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu149, permute_dims1187, out_dtype="void") add1419: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1186, model_decoder_layers_19_fc2_bias5) add1420: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1417, add1419) layer_norm416: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1420, model_decoder_layers_20_self_attn_layer_norm_weight5, model_decoder_layers_20_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1188: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_q_proj_weight5, axes=None) matmul1187: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm416, permute_dims1188, out_dtype="void") add1421: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1187, model_decoder_layers_20_self_attn_q_proj_bias5) reshape1556: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1421, R.shape([1, 1, 20, 64])) permute_dims1189: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_k_proj_weight5, axes=None) matmul1188: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm416, permute_dims1189, out_dtype="void") reshape1557: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1188, R.shape([1, 1, 20, 64])) permute_dims1190: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_v_proj_weight5, axes=None) matmul1189: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm416, permute_dims1190, out_dtype="void") add1422: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1189, model_decoder_layers_20_self_attn_v_proj_bias5) reshape1558: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1422, R.shape([1, 1, 20, 64])) concat116: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1556, reshape1557, reshape1558), axis=2) reshape1559: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat116, R.shape([1, 60, 64])) lv305 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1559), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1560: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv305, R.shape([1, 1, 20, 64])) reshape1561: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1560, R.shape([1, 1, 1280])) permute_dims1191: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_out_proj_weight5, axes=None) matmul1190: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1561, permute_dims1191, out_dtype="void") add1423: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1190, model_decoder_layers_20_self_attn_out_proj_bias5) add1424: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1420, add1423) layer_norm417: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1424, model_decoder_layers_20_encoder_attn_layer_norm_weight5, model_decoder_layers_20_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1192: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_q_proj_weight5, axes=None) matmul1191: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm417, permute_dims1192, out_dtype="void") add1425: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1191, model_decoder_layers_20_encoder_attn_q_proj_bias5) reshape1562: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1425, R.shape([1, 1, 20, 64])) reshape1563: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1562, R.shape([1, 20, 64])) lv306 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1563), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1564: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv306, R.shape([1, 1, 20, 64])) reshape1565: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1564, R.shape([1, 1, 1280])) permute_dims1193: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_out_proj_weight5, axes=None) matmul1192: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1565, permute_dims1193, out_dtype="void") add1426: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1192, model_decoder_layers_20_encoder_attn_out_proj_bias5) add1427: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1424, add1426) layer_norm418: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1427, model_decoder_layers_20_final_layer_norm_weight5, model_decoder_layers_20_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1194: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc1_weight5, axes=None) matmul1193: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm418, permute_dims1194, out_dtype="void") add1428: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1193, model_decoder_layers_20_fc1_bias5) gelu150: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1428) permute_dims1195: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc2_weight5, axes=None) matmul1194: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu150, permute_dims1195, out_dtype="void") add1429: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1194, model_decoder_layers_20_fc2_bias5) add1430: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1427, add1429) layer_norm419: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1430, model_decoder_layers_21_self_attn_layer_norm_weight5, model_decoder_layers_21_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1196: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_q_proj_weight5, axes=None) matmul1195: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm419, permute_dims1196, out_dtype="void") add1431: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1195, model_decoder_layers_21_self_attn_q_proj_bias5) reshape1566: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1431, R.shape([1, 1, 20, 64])) permute_dims1197: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_k_proj_weight5, axes=None) matmul1196: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm419, permute_dims1197, out_dtype="void") reshape1567: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1196, R.shape([1, 1, 20, 64])) permute_dims1198: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_v_proj_weight5, axes=None) matmul1197: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm419, permute_dims1198, out_dtype="void") add1432: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1197, model_decoder_layers_21_self_attn_v_proj_bias5) reshape1568: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1432, R.shape([1, 1, 20, 64])) concat117: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1566, reshape1567, reshape1568), axis=2) reshape1569: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat117, R.shape([1, 60, 64])) lv307 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1569), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1570: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv307, R.shape([1, 1, 20, 64])) reshape1571: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1570, R.shape([1, 1, 1280])) permute_dims1199: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_out_proj_weight5, axes=None) matmul1198: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1571, permute_dims1199, out_dtype="void") add1433: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1198, model_decoder_layers_21_self_attn_out_proj_bias5) add1434: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1430, add1433) layer_norm420: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1434, model_decoder_layers_21_encoder_attn_layer_norm_weight5, model_decoder_layers_21_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1200: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_q_proj_weight5, axes=None) matmul1199: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm420, permute_dims1200, out_dtype="void") add1435: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1199, model_decoder_layers_21_encoder_attn_q_proj_bias5) reshape1572: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1435, R.shape([1, 1, 20, 64])) reshape1573: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1572, R.shape([1, 20, 64])) lv308 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1573), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1574: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv308, R.shape([1, 1, 20, 64])) reshape1575: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1574, R.shape([1, 1, 1280])) permute_dims1201: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_out_proj_weight5, axes=None) matmul1200: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1575, permute_dims1201, out_dtype="void") add1436: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1200, model_decoder_layers_21_encoder_attn_out_proj_bias5) add1437: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1434, add1436) layer_norm421: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1437, model_decoder_layers_21_final_layer_norm_weight5, model_decoder_layers_21_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1202: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc1_weight5, axes=None) matmul1201: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm421, permute_dims1202, out_dtype="void") add1438: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1201, model_decoder_layers_21_fc1_bias5) gelu151: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1438) permute_dims1203: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc2_weight5, axes=None) matmul1202: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu151, permute_dims1203, out_dtype="void") add1439: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1202, model_decoder_layers_21_fc2_bias5) add1440: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1437, add1439) layer_norm422: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1440, model_decoder_layers_22_self_attn_layer_norm_weight5, model_decoder_layers_22_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1204: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_q_proj_weight5, axes=None) matmul1203: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm422, permute_dims1204, out_dtype="void") add1441: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1203, model_decoder_layers_22_self_attn_q_proj_bias5) reshape1576: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1441, R.shape([1, 1, 20, 64])) permute_dims1205: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_k_proj_weight5, axes=None) matmul1204: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm422, permute_dims1205, out_dtype="void") reshape1577: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1204, R.shape([1, 1, 20, 64])) permute_dims1206: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_v_proj_weight5, axes=None) matmul1205: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm422, permute_dims1206, out_dtype="void") add1442: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1205, model_decoder_layers_22_self_attn_v_proj_bias5) reshape1578: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1442, R.shape([1, 1, 20, 64])) concat118: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1576, reshape1577, reshape1578), axis=2) reshape1579: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat118, R.shape([1, 60, 64])) lv309 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1579), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1580: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv309, R.shape([1, 1, 20, 64])) reshape1581: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1580, R.shape([1, 1, 1280])) permute_dims1207: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_out_proj_weight5, axes=None) matmul1206: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1581, permute_dims1207, out_dtype="void") add1443: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1206, model_decoder_layers_22_self_attn_out_proj_bias5) add1444: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1440, add1443) layer_norm423: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1444, model_decoder_layers_22_encoder_attn_layer_norm_weight5, model_decoder_layers_22_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1208: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_q_proj_weight5, axes=None) matmul1207: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm423, permute_dims1208, out_dtype="void") add1445: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1207, model_decoder_layers_22_encoder_attn_q_proj_bias5) reshape1582: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1445, R.shape([1, 1, 20, 64])) reshape1583: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1582, R.shape([1, 20, 64])) lv310 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1583), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1584: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv310, R.shape([1, 1, 20, 64])) reshape1585: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1584, R.shape([1, 1, 1280])) permute_dims1209: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_out_proj_weight5, axes=None) matmul1208: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1585, permute_dims1209, out_dtype="void") add1446: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1208, model_decoder_layers_22_encoder_attn_out_proj_bias5) add1447: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1444, add1446) layer_norm424: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1447, model_decoder_layers_22_final_layer_norm_weight5, model_decoder_layers_22_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1210: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc1_weight5, axes=None) matmul1209: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm424, permute_dims1210, out_dtype="void") add1448: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1209, model_decoder_layers_22_fc1_bias5) gelu152: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1448) permute_dims1211: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc2_weight5, axes=None) matmul1210: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu152, permute_dims1211, out_dtype="void") add1449: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1210, model_decoder_layers_22_fc2_bias5) add1450: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1447, add1449) layer_norm425: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1450, model_decoder_layers_23_self_attn_layer_norm_weight5, model_decoder_layers_23_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1212: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_q_proj_weight5, axes=None) matmul1211: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm425, permute_dims1212, out_dtype="void") add1451: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1211, model_decoder_layers_23_self_attn_q_proj_bias5) reshape1586: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1451, R.shape([1, 1, 20, 64])) permute_dims1213: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_k_proj_weight5, axes=None) matmul1212: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm425, permute_dims1213, out_dtype="void") reshape1587: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1212, R.shape([1, 1, 20, 64])) permute_dims1214: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_v_proj_weight5, axes=None) matmul1213: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm425, permute_dims1214, out_dtype="void") add1452: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1213, model_decoder_layers_23_self_attn_v_proj_bias5) reshape1588: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1452, R.shape([1, 1, 20, 64])) concat119: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1586, reshape1587, reshape1588), axis=2) reshape1589: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat119, R.shape([1, 60, 64])) lv311 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1589), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1590: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv311, R.shape([1, 1, 20, 64])) reshape1591: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1590, R.shape([1, 1, 1280])) permute_dims1215: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_out_proj_weight5, axes=None) matmul1214: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1591, permute_dims1215, out_dtype="void") add1453: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1214, model_decoder_layers_23_self_attn_out_proj_bias5) add1454: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1450, add1453) layer_norm426: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1454, model_decoder_layers_23_encoder_attn_layer_norm_weight5, model_decoder_layers_23_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1216: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_q_proj_weight5, axes=None) matmul1215: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm426, permute_dims1216, out_dtype="void") add1455: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1215, model_decoder_layers_23_encoder_attn_q_proj_bias5) reshape1592: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1455, R.shape([1, 1, 20, 64])) reshape1593: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1592, R.shape([1, 20, 64])) lv312 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1593), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1594: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv312, R.shape([1, 1, 20, 64])) reshape1595: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1594, R.shape([1, 1, 1280])) permute_dims1217: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_out_proj_weight5, axes=None) matmul1216: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1595, permute_dims1217, out_dtype="void") add1456: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1216, model_decoder_layers_23_encoder_attn_out_proj_bias5) add1457: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1454, add1456) layer_norm427: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1457, model_decoder_layers_23_final_layer_norm_weight5, model_decoder_layers_23_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1218: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc1_weight5, axes=None) matmul1217: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm427, permute_dims1218, out_dtype="void") add1458: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1217, model_decoder_layers_23_fc1_bias5) gelu153: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1458) permute_dims1219: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc2_weight5, axes=None) matmul1218: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu153, permute_dims1219, out_dtype="void") add1459: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1218, model_decoder_layers_23_fc2_bias5) add1460: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1457, add1459) layer_norm428: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1460, model_decoder_layers_24_self_attn_layer_norm_weight5, model_decoder_layers_24_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1220: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_q_proj_weight5, axes=None) matmul1219: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm428, permute_dims1220, out_dtype="void") add1461: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1219, model_decoder_layers_24_self_attn_q_proj_bias5) reshape1596: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1461, R.shape([1, 1, 20, 64])) permute_dims1221: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_k_proj_weight5, axes=None) matmul1220: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm428, permute_dims1221, out_dtype="void") reshape1597: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1220, R.shape([1, 1, 20, 64])) permute_dims1222: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_v_proj_weight5, axes=None) matmul1221: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm428, permute_dims1222, out_dtype="void") add1462: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1221, model_decoder_layers_24_self_attn_v_proj_bias5) reshape1598: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1462, R.shape([1, 1, 20, 64])) concat120: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1596, reshape1597, reshape1598), axis=2) reshape1599: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat120, R.shape([1, 60, 64])) lv313 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1599), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1600: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv313, R.shape([1, 1, 20, 64])) reshape1601: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1600, R.shape([1, 1, 1280])) permute_dims1223: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_out_proj_weight5, axes=None) matmul1222: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1601, permute_dims1223, out_dtype="void") add1463: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1222, model_decoder_layers_24_self_attn_out_proj_bias5) add1464: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1460, add1463) layer_norm429: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1464, model_decoder_layers_24_encoder_attn_layer_norm_weight5, model_decoder_layers_24_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1224: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_q_proj_weight5, axes=None) matmul1223: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm429, permute_dims1224, out_dtype="void") add1465: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1223, model_decoder_layers_24_encoder_attn_q_proj_bias5) reshape1602: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1465, R.shape([1, 1, 20, 64])) reshape1603: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1602, R.shape([1, 20, 64])) lv314 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1603), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1604: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv314, R.shape([1, 1, 20, 64])) reshape1605: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1604, R.shape([1, 1, 1280])) permute_dims1225: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_out_proj_weight5, axes=None) matmul1224: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1605, permute_dims1225, out_dtype="void") add1466: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1224, model_decoder_layers_24_encoder_attn_out_proj_bias5) add1467: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1464, add1466) layer_norm430: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1467, model_decoder_layers_24_final_layer_norm_weight5, model_decoder_layers_24_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1226: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc1_weight5, axes=None) matmul1225: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm430, permute_dims1226, out_dtype="void") add1468: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1225, model_decoder_layers_24_fc1_bias5) gelu154: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1468) permute_dims1227: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc2_weight5, axes=None) matmul1226: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu154, permute_dims1227, out_dtype="void") add1469: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1226, model_decoder_layers_24_fc2_bias5) add1470: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1467, add1469) layer_norm431: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1470, model_decoder_layers_25_self_attn_layer_norm_weight5, model_decoder_layers_25_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1228: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_q_proj_weight5, axes=None) matmul1227: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm431, permute_dims1228, out_dtype="void") add1471: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1227, model_decoder_layers_25_self_attn_q_proj_bias5) reshape1606: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1471, R.shape([1, 1, 20, 64])) permute_dims1229: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_k_proj_weight5, axes=None) matmul1228: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm431, permute_dims1229, out_dtype="void") reshape1607: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1228, R.shape([1, 1, 20, 64])) permute_dims1230: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_v_proj_weight5, axes=None) matmul1229: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm431, permute_dims1230, out_dtype="void") add1472: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1229, model_decoder_layers_25_self_attn_v_proj_bias5) reshape1608: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1472, R.shape([1, 1, 20, 64])) concat121: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1606, reshape1607, reshape1608), axis=2) reshape1609: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat121, R.shape([1, 60, 64])) lv315 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1609), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1610: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv315, R.shape([1, 1, 20, 64])) reshape1611: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1610, R.shape([1, 1, 1280])) permute_dims1231: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_out_proj_weight5, axes=None) matmul1230: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1611, permute_dims1231, out_dtype="void") add1473: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1230, model_decoder_layers_25_self_attn_out_proj_bias5) add1474: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1470, add1473) layer_norm432: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1474, model_decoder_layers_25_encoder_attn_layer_norm_weight5, model_decoder_layers_25_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1232: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_q_proj_weight5, axes=None) matmul1231: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm432, permute_dims1232, out_dtype="void") add1475: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1231, model_decoder_layers_25_encoder_attn_q_proj_bias5) reshape1612: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1475, R.shape([1, 1, 20, 64])) reshape1613: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1612, R.shape([1, 20, 64])) lv316 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1613), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1614: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv316, R.shape([1, 1, 20, 64])) reshape1615: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1614, R.shape([1, 1, 1280])) permute_dims1233: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_out_proj_weight5, axes=None) matmul1232: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1615, permute_dims1233, out_dtype="void") add1476: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1232, model_decoder_layers_25_encoder_attn_out_proj_bias5) add1477: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1474, add1476) layer_norm433: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1477, model_decoder_layers_25_final_layer_norm_weight5, model_decoder_layers_25_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1234: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc1_weight5, axes=None) matmul1233: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm433, permute_dims1234, out_dtype="void") add1478: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1233, model_decoder_layers_25_fc1_bias5) gelu155: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1478) permute_dims1235: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc2_weight5, axes=None) matmul1234: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu155, permute_dims1235, out_dtype="void") add1479: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1234, model_decoder_layers_25_fc2_bias5) add1480: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1477, add1479) layer_norm434: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1480, model_decoder_layers_26_self_attn_layer_norm_weight5, model_decoder_layers_26_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1236: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_q_proj_weight5, axes=None) matmul1235: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm434, permute_dims1236, out_dtype="void") add1481: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1235, model_decoder_layers_26_self_attn_q_proj_bias5) reshape1616: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1481, R.shape([1, 1, 20, 64])) permute_dims1237: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_k_proj_weight5, axes=None) matmul1236: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm434, permute_dims1237, out_dtype="void") reshape1617: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1236, R.shape([1, 1, 20, 64])) permute_dims1238: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_v_proj_weight5, axes=None) matmul1237: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm434, permute_dims1238, out_dtype="void") add1482: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1237, model_decoder_layers_26_self_attn_v_proj_bias5) reshape1618: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1482, R.shape([1, 1, 20, 64])) concat122: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1616, reshape1617, reshape1618), axis=2) reshape1619: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat122, R.shape([1, 60, 64])) lv317 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1619), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1620: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv317, R.shape([1, 1, 20, 64])) reshape1621: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1620, R.shape([1, 1, 1280])) permute_dims1239: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_out_proj_weight5, axes=None) matmul1238: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1621, permute_dims1239, out_dtype="void") add1483: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1238, model_decoder_layers_26_self_attn_out_proj_bias5) add1484: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1480, add1483) layer_norm435: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1484, model_decoder_layers_26_encoder_attn_layer_norm_weight5, model_decoder_layers_26_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1240: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_q_proj_weight5, axes=None) matmul1239: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm435, permute_dims1240, out_dtype="void") add1485: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1239, model_decoder_layers_26_encoder_attn_q_proj_bias5) reshape1622: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1485, R.shape([1, 1, 20, 64])) reshape1623: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1622, R.shape([1, 20, 64])) lv318 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1623), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1624: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv318, R.shape([1, 1, 20, 64])) reshape1625: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1624, R.shape([1, 1, 1280])) permute_dims1241: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_out_proj_weight5, axes=None) matmul1240: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1625, permute_dims1241, out_dtype="void") add1486: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1240, model_decoder_layers_26_encoder_attn_out_proj_bias5) add1487: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1484, add1486) layer_norm436: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1487, model_decoder_layers_26_final_layer_norm_weight5, model_decoder_layers_26_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1242: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc1_weight5, axes=None) matmul1241: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm436, permute_dims1242, out_dtype="void") add1488: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1241, model_decoder_layers_26_fc1_bias5) gelu156: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1488) permute_dims1243: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc2_weight5, axes=None) matmul1242: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu156, permute_dims1243, out_dtype="void") add1489: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1242, model_decoder_layers_26_fc2_bias5) add1490: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1487, add1489) layer_norm437: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1490, model_decoder_layers_27_self_attn_layer_norm_weight5, model_decoder_layers_27_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1244: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_q_proj_weight5, axes=None) matmul1243: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm437, permute_dims1244, out_dtype="void") add1491: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1243, model_decoder_layers_27_self_attn_q_proj_bias5) reshape1626: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1491, R.shape([1, 1, 20, 64])) permute_dims1245: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_k_proj_weight5, axes=None) matmul1244: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm437, permute_dims1245, out_dtype="void") reshape1627: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1244, R.shape([1, 1, 20, 64])) permute_dims1246: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_v_proj_weight5, axes=None) matmul1245: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm437, permute_dims1246, out_dtype="void") add1492: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1245, model_decoder_layers_27_self_attn_v_proj_bias5) reshape1628: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1492, R.shape([1, 1, 20, 64])) concat123: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1626, reshape1627, reshape1628), axis=2) reshape1629: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat123, R.shape([1, 60, 64])) lv319 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1629), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1630: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv319, R.shape([1, 1, 20, 64])) reshape1631: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1630, R.shape([1, 1, 1280])) permute_dims1247: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_out_proj_weight5, axes=None) matmul1246: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1631, permute_dims1247, out_dtype="void") add1493: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1246, model_decoder_layers_27_self_attn_out_proj_bias5) add1494: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1490, add1493) layer_norm438: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1494, model_decoder_layers_27_encoder_attn_layer_norm_weight5, model_decoder_layers_27_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1248: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_q_proj_weight5, axes=None) matmul1247: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm438, permute_dims1248, out_dtype="void") add1495: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1247, model_decoder_layers_27_encoder_attn_q_proj_bias5) reshape1632: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1495, R.shape([1, 1, 20, 64])) reshape1633: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1632, R.shape([1, 20, 64])) lv320 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1633), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1634: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv320, R.shape([1, 1, 20, 64])) reshape1635: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1634, R.shape([1, 1, 1280])) permute_dims1249: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_out_proj_weight5, axes=None) matmul1248: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1635, permute_dims1249, out_dtype="void") add1496: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1248, model_decoder_layers_27_encoder_attn_out_proj_bias5) add1497: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1494, add1496) layer_norm439: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1497, model_decoder_layers_27_final_layer_norm_weight5, model_decoder_layers_27_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1250: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc1_weight5, axes=None) matmul1249: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm439, permute_dims1250, out_dtype="void") add1498: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1249, model_decoder_layers_27_fc1_bias5) gelu157: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1498) permute_dims1251: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc2_weight5, axes=None) matmul1250: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu157, permute_dims1251, out_dtype="void") add1499: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1250, model_decoder_layers_27_fc2_bias5) add1500: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1497, add1499) layer_norm440: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1500, model_decoder_layers_28_self_attn_layer_norm_weight5, model_decoder_layers_28_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1252: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_q_proj_weight5, axes=None) matmul1251: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm440, permute_dims1252, out_dtype="void") add1501: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1251, model_decoder_layers_28_self_attn_q_proj_bias5) reshape1636: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1501, R.shape([1, 1, 20, 64])) permute_dims1253: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_k_proj_weight5, axes=None) matmul1252: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm440, permute_dims1253, out_dtype="void") reshape1637: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1252, R.shape([1, 1, 20, 64])) permute_dims1254: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_v_proj_weight5, axes=None) matmul1253: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm440, permute_dims1254, out_dtype="void") add1502: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1253, model_decoder_layers_28_self_attn_v_proj_bias5) reshape1638: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1502, R.shape([1, 1, 20, 64])) concat124: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1636, reshape1637, reshape1638), axis=2) reshape1639: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat124, R.shape([1, 60, 64])) lv321 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1639), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1640: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv321, R.shape([1, 1, 20, 64])) reshape1641: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1640, R.shape([1, 1, 1280])) permute_dims1255: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_out_proj_weight5, axes=None) matmul1254: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1641, permute_dims1255, out_dtype="void") add1503: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1254, model_decoder_layers_28_self_attn_out_proj_bias5) add1504: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1500, add1503) layer_norm441: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1504, model_decoder_layers_28_encoder_attn_layer_norm_weight5, model_decoder_layers_28_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1256: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_q_proj_weight5, axes=None) matmul1255: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm441, permute_dims1256, out_dtype="void") add1505: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1255, model_decoder_layers_28_encoder_attn_q_proj_bias5) reshape1642: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1505, R.shape([1, 1, 20, 64])) reshape1643: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1642, R.shape([1, 20, 64])) lv322 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1643), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1644: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv322, R.shape([1, 1, 20, 64])) reshape1645: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1644, R.shape([1, 1, 1280])) permute_dims1257: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_out_proj_weight5, axes=None) matmul1256: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1645, permute_dims1257, out_dtype="void") add1506: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1256, model_decoder_layers_28_encoder_attn_out_proj_bias5) add1507: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1504, add1506) layer_norm442: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1507, model_decoder_layers_28_final_layer_norm_weight5, model_decoder_layers_28_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1258: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc1_weight5, axes=None) matmul1257: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm442, permute_dims1258, out_dtype="void") add1508: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1257, model_decoder_layers_28_fc1_bias5) gelu158: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1508) permute_dims1259: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc2_weight5, axes=None) matmul1258: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu158, permute_dims1259, out_dtype="void") add1509: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1258, model_decoder_layers_28_fc2_bias5) add1510: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1507, add1509) layer_norm443: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1510, model_decoder_layers_29_self_attn_layer_norm_weight5, model_decoder_layers_29_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1260: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_q_proj_weight5, axes=None) matmul1259: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm443, permute_dims1260, out_dtype="void") add1511: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1259, model_decoder_layers_29_self_attn_q_proj_bias5) reshape1646: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1511, R.shape([1, 1, 20, 64])) permute_dims1261: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_k_proj_weight5, axes=None) matmul1260: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm443, permute_dims1261, out_dtype="void") reshape1647: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1260, R.shape([1, 1, 20, 64])) permute_dims1262: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_v_proj_weight5, axes=None) matmul1261: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm443, permute_dims1262, out_dtype="void") add1512: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1261, model_decoder_layers_29_self_attn_v_proj_bias5) reshape1648: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1512, R.shape([1, 1, 20, 64])) concat125: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1646, reshape1647, reshape1648), axis=2) reshape1649: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat125, R.shape([1, 60, 64])) lv323 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1649), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1650: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv323, R.shape([1, 1, 20, 64])) reshape1651: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1650, R.shape([1, 1, 1280])) permute_dims1263: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_out_proj_weight5, axes=None) matmul1262: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1651, permute_dims1263, out_dtype="void") add1513: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1262, model_decoder_layers_29_self_attn_out_proj_bias5) add1514: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1510, add1513) layer_norm444: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1514, model_decoder_layers_29_encoder_attn_layer_norm_weight5, model_decoder_layers_29_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1264: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_q_proj_weight5, axes=None) matmul1263: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm444, permute_dims1264, out_dtype="void") add1515: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1263, model_decoder_layers_29_encoder_attn_q_proj_bias5) reshape1652: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1515, R.shape([1, 1, 20, 64])) reshape1653: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1652, R.shape([1, 20, 64])) lv324 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1653), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1654: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv324, R.shape([1, 1, 20, 64])) reshape1655: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1654, R.shape([1, 1, 1280])) permute_dims1265: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_out_proj_weight5, axes=None) matmul1264: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1655, permute_dims1265, out_dtype="void") add1516: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1264, model_decoder_layers_29_encoder_attn_out_proj_bias5) add1517: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1514, add1516) layer_norm445: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1517, model_decoder_layers_29_final_layer_norm_weight5, model_decoder_layers_29_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1266: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc1_weight5, axes=None) matmul1265: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm445, permute_dims1266, out_dtype="void") add1518: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1265, model_decoder_layers_29_fc1_bias5) gelu159: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1518) permute_dims1267: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc2_weight5, axes=None) matmul1266: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu159, permute_dims1267, out_dtype="void") add1519: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1266, model_decoder_layers_29_fc2_bias5) add1520: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1517, add1519) layer_norm446: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1520, model_decoder_layers_30_self_attn_layer_norm_weight5, model_decoder_layers_30_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1268: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_q_proj_weight5, axes=None) matmul1267: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm446, permute_dims1268, out_dtype="void") add1521: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1267, model_decoder_layers_30_self_attn_q_proj_bias5) reshape1656: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1521, R.shape([1, 1, 20, 64])) permute_dims1269: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_k_proj_weight5, axes=None) matmul1268: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm446, permute_dims1269, out_dtype="void") reshape1657: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1268, R.shape([1, 1, 20, 64])) permute_dims1270: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_v_proj_weight5, axes=None) matmul1269: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm446, permute_dims1270, out_dtype="void") add1522: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1269, model_decoder_layers_30_self_attn_v_proj_bias5) reshape1658: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1522, R.shape([1, 1, 20, 64])) concat126: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1656, reshape1657, reshape1658), axis=2) reshape1659: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat126, R.shape([1, 60, 64])) lv325 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1659), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1660: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv325, R.shape([1, 1, 20, 64])) reshape1661: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1660, R.shape([1, 1, 1280])) permute_dims1271: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_out_proj_weight5, axes=None) matmul1270: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1661, permute_dims1271, out_dtype="void") add1523: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1270, model_decoder_layers_30_self_attn_out_proj_bias5) add1524: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1520, add1523) layer_norm447: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1524, model_decoder_layers_30_encoder_attn_layer_norm_weight5, model_decoder_layers_30_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1272: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_q_proj_weight5, axes=None) matmul1271: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm447, permute_dims1272, out_dtype="void") add1525: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1271, model_decoder_layers_30_encoder_attn_q_proj_bias5) reshape1662: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1525, R.shape([1, 1, 20, 64])) reshape1663: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1662, R.shape([1, 20, 64])) lv326 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1663), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1664: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv326, R.shape([1, 1, 20, 64])) reshape1665: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1664, R.shape([1, 1, 1280])) permute_dims1273: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_out_proj_weight5, axes=None) matmul1272: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1665, permute_dims1273, out_dtype="void") add1526: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1272, model_decoder_layers_30_encoder_attn_out_proj_bias5) add1527: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1524, add1526) layer_norm448: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1527, model_decoder_layers_30_final_layer_norm_weight5, model_decoder_layers_30_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1274: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc1_weight5, axes=None) matmul1273: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm448, permute_dims1274, out_dtype="void") add1528: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1273, model_decoder_layers_30_fc1_bias5) gelu160: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1528) permute_dims1275: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc2_weight5, axes=None) matmul1274: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu160, permute_dims1275, out_dtype="void") add1529: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1274, model_decoder_layers_30_fc2_bias5) add1530: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1527, add1529) layer_norm449: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1530, model_decoder_layers_31_self_attn_layer_norm_weight5, model_decoder_layers_31_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1276: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_q_proj_weight5, axes=None) matmul1275: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm449, permute_dims1276, out_dtype="void") add1531: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1275, model_decoder_layers_31_self_attn_q_proj_bias5) reshape1666: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1531, R.shape([1, 1, 20, 64])) permute_dims1277: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_k_proj_weight5, axes=None) matmul1276: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm449, permute_dims1277, out_dtype="void") reshape1667: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(matmul1276, R.shape([1, 1, 20, 64])) permute_dims1278: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_v_proj_weight5, axes=None) matmul1277: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm449, permute_dims1278, out_dtype="void") add1532: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1277, model_decoder_layers_31_self_attn_v_proj_bias5) reshape1668: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1532, R.shape([1, 1, 20, 64])) concat127: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1666, reshape1667, reshape1668), axis=2) reshape1669: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat127, R.shape([1, 60, 64])) lv327 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1669), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1670: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv327, R.shape([1, 1, 20, 64])) reshape1671: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1670, R.shape([1, 1, 1280])) permute_dims1279: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_out_proj_weight5, axes=None) matmul1278: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1671, permute_dims1279, out_dtype="void") add1533: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1278, model_decoder_layers_31_self_attn_out_proj_bias5) add1534: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1530, add1533) layer_norm450: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1534, model_decoder_layers_31_encoder_attn_layer_norm_weight5, model_decoder_layers_31_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1280: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_q_proj_weight5, axes=None) matmul1279: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm450, permute_dims1280, out_dtype="void") add1535: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1279, model_decoder_layers_31_encoder_attn_q_proj_bias5) reshape1672: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1535, R.shape([1, 1, 20, 64])) reshape1673: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1672, R.shape([1, 20, 64])) lv328 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1673), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) reshape1674: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv328, R.shape([1, 1, 20, 64])) reshape1675: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1674, R.shape([1, 1, 1280])) permute_dims1281: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_out_proj_weight5, axes=None) matmul1280: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(reshape1675, permute_dims1281, out_dtype="void") add1536: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1280, model_decoder_layers_31_encoder_attn_out_proj_bias5) add1537: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1534, add1536) layer_norm451: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1537, model_decoder_layers_31_final_layer_norm_weight5, model_decoder_layers_31_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1282: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc1_weight5, axes=None) matmul1281: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm451, permute_dims1282, out_dtype="void") add1538: R.Tensor((1, 1, 5120), dtype="float16") = R.add(matmul1281, model_decoder_layers_31_fc1_bias5) gelu161: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1538) permute_dims1283: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc2_weight5, axes=None) matmul1282: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu161, permute_dims1283, out_dtype="void") add1539: R.Tensor((1, 1, 1280), dtype="float16") = R.add(matmul1282, model_decoder_layers_31_fc2_bias5) add1540: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1537, add1539) layer_norm452: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1540, model_decoder_layer_norm_weight5, model_decoder_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1284: R.Tensor((1280, 51866), dtype="float16") = R.permute_dims(model_decoder_embed_tokens_weight5, axes=None) matmul1283: R.Tensor((1, 1, 51866), dtype="float32") = R.matmul(layer_norm452, permute_dims1284, out_dtype="float32") gv5: R.Tensor((1, 1, 51866), dtype="float32") = matmul1283 R.output(gv5) return gv5 @R.function def multinomial_from_uniform(probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), uniform_samples: R.Tensor(("num_samples",), dtype="float32"), sample_indices: R.Tensor(("num_samples",), dtype="int32")) -> R.Tensor(("num_samples",), dtype="int32"): num_samples = T.int64() batch_size = T.int64() vocab_size = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) with R.dataflow(): probs_1: R.Tensor((batch_size, vocab_size), dtype="float32") = probs uniform_samples_1: R.Tensor((num_samples, 1), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", uniform_samples, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="float32"),)) sample_indices_1: R.Tensor((num_samples, 1), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", sample_indices, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="int32"),)) nn_multinomial_from_uniform: R.Tensor((num_samples, 1), dtype="int32") = R.multinomial_from_uniform(probs_1, uniform_samples_1, sample_indices_1, dtype="int32") lv: R.Tensor((num_samples,), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", nn_multinomial_from_uniform, R.shape([num_samples]), sinfo_args=(R.Tensor((num_samples,), dtype="int32"),)) gv: R.Tensor((num_samples,), dtype="int32") = lv R.output(gv) return gv @R.function def prefill(input_ids: R.Tensor((1, "seq_len"), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor((1, 1, 51866), dtype="float32"): seq_len = T.int64() R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) cls = Module with R.dataflow(): model_encoder_conv1_weight4: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] model_encoder_conv1_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1] model_encoder_conv2_weight4: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] model_encoder_conv2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[3] model_encoder_embed_positions_weight4: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] model_encoder_layers_0_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] model_encoder_layers_0_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] model_encoder_layers_0_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[7] model_encoder_layers_0_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] model_encoder_layers_0_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[9] model_encoder_layers_0_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] model_encoder_layers_0_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[11] model_encoder_layers_0_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[12] model_encoder_layers_0_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[13] model_encoder_layers_0_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] model_encoder_layers_0_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[15] model_encoder_layers_0_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] model_encoder_layers_0_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[17] model_encoder_layers_0_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[18] model_encoder_layers_0_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[19] model_encoder_layers_1_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] model_encoder_layers_1_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] model_encoder_layers_1_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[22] model_encoder_layers_1_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] model_encoder_layers_1_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[24] model_encoder_layers_1_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] model_encoder_layers_1_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[26] model_encoder_layers_1_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[27] model_encoder_layers_1_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[28] model_encoder_layers_1_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] model_encoder_layers_1_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[30] model_encoder_layers_1_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] model_encoder_layers_1_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[32] model_encoder_layers_1_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[33] model_encoder_layers_1_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[34] model_encoder_layers_2_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] model_encoder_layers_2_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] model_encoder_layers_2_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[37] model_encoder_layers_2_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] model_encoder_layers_2_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[39] model_encoder_layers_2_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] model_encoder_layers_2_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[41] model_encoder_layers_2_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[42] model_encoder_layers_2_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[43] model_encoder_layers_2_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] model_encoder_layers_2_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[45] model_encoder_layers_2_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] model_encoder_layers_2_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[47] model_encoder_layers_2_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[48] model_encoder_layers_2_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[49] model_encoder_layers_3_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] model_encoder_layers_3_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] model_encoder_layers_3_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[52] model_encoder_layers_3_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] model_encoder_layers_3_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[54] model_encoder_layers_3_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] model_encoder_layers_3_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[56] model_encoder_layers_3_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[57] model_encoder_layers_3_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[58] model_encoder_layers_3_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] model_encoder_layers_3_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[60] model_encoder_layers_3_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] model_encoder_layers_3_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[62] model_encoder_layers_3_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[63] model_encoder_layers_3_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[64] model_encoder_layers_4_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] model_encoder_layers_4_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] model_encoder_layers_4_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[67] model_encoder_layers_4_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] model_encoder_layers_4_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[69] model_encoder_layers_4_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] model_encoder_layers_4_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[71] model_encoder_layers_4_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[72] model_encoder_layers_4_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[73] model_encoder_layers_4_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] model_encoder_layers_4_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[75] model_encoder_layers_4_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] model_encoder_layers_4_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[77] model_encoder_layers_4_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[78] model_encoder_layers_4_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[79] model_encoder_layers_5_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] model_encoder_layers_5_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] model_encoder_layers_5_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[82] model_encoder_layers_5_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] model_encoder_layers_5_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[84] model_encoder_layers_5_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] model_encoder_layers_5_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[86] model_encoder_layers_5_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[87] model_encoder_layers_5_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[88] model_encoder_layers_5_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] model_encoder_layers_5_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[90] model_encoder_layers_5_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] model_encoder_layers_5_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[92] model_encoder_layers_5_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[93] model_encoder_layers_5_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[94] model_encoder_layers_6_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] model_encoder_layers_6_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] model_encoder_layers_6_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[97] model_encoder_layers_6_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] model_encoder_layers_6_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[99] model_encoder_layers_6_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] model_encoder_layers_6_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[101] model_encoder_layers_6_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[102] model_encoder_layers_6_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[103] model_encoder_layers_6_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] model_encoder_layers_6_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[105] model_encoder_layers_6_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] model_encoder_layers_6_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[107] model_encoder_layers_6_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[108] model_encoder_layers_6_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[109] model_encoder_layers_7_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] model_encoder_layers_7_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] model_encoder_layers_7_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[112] model_encoder_layers_7_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] model_encoder_layers_7_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[114] model_encoder_layers_7_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] model_encoder_layers_7_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[116] model_encoder_layers_7_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[117] model_encoder_layers_7_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[118] model_encoder_layers_7_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] model_encoder_layers_7_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[120] model_encoder_layers_7_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] model_encoder_layers_7_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[122] model_encoder_layers_7_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[123] model_encoder_layers_7_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[124] model_encoder_layers_8_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] model_encoder_layers_8_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] model_encoder_layers_8_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[127] model_encoder_layers_8_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] model_encoder_layers_8_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[129] model_encoder_layers_8_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] model_encoder_layers_8_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[131] model_encoder_layers_8_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[132] model_encoder_layers_8_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[133] model_encoder_layers_8_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] model_encoder_layers_8_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[135] model_encoder_layers_8_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] model_encoder_layers_8_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[137] model_encoder_layers_8_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[138] model_encoder_layers_8_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[139] model_encoder_layers_9_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] model_encoder_layers_9_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] model_encoder_layers_9_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[142] model_encoder_layers_9_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] model_encoder_layers_9_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[144] model_encoder_layers_9_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] model_encoder_layers_9_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[146] model_encoder_layers_9_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[147] model_encoder_layers_9_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[148] model_encoder_layers_9_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] model_encoder_layers_9_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[150] model_encoder_layers_9_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] model_encoder_layers_9_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[152] model_encoder_layers_9_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[153] model_encoder_layers_9_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[154] model_encoder_layers_10_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] model_encoder_layers_10_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] model_encoder_layers_10_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[157] model_encoder_layers_10_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] model_encoder_layers_10_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[159] model_encoder_layers_10_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] model_encoder_layers_10_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[161] model_encoder_layers_10_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[162] model_encoder_layers_10_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[163] model_encoder_layers_10_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] model_encoder_layers_10_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[165] model_encoder_layers_10_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] model_encoder_layers_10_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[167] model_encoder_layers_10_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[168] model_encoder_layers_10_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[169] model_encoder_layers_11_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] model_encoder_layers_11_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] model_encoder_layers_11_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[172] model_encoder_layers_11_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] model_encoder_layers_11_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[174] model_encoder_layers_11_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] model_encoder_layers_11_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[176] model_encoder_layers_11_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[177] model_encoder_layers_11_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[178] model_encoder_layers_11_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] model_encoder_layers_11_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[180] model_encoder_layers_11_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] model_encoder_layers_11_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[182] model_encoder_layers_11_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[183] model_encoder_layers_11_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[184] model_encoder_layers_12_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] model_encoder_layers_12_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] model_encoder_layers_12_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[187] model_encoder_layers_12_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] model_encoder_layers_12_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[189] model_encoder_layers_12_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] model_encoder_layers_12_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[191] model_encoder_layers_12_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[192] model_encoder_layers_12_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[193] model_encoder_layers_12_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] model_encoder_layers_12_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[195] model_encoder_layers_12_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] model_encoder_layers_12_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[197] model_encoder_layers_12_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[198] model_encoder_layers_12_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[199] model_encoder_layers_13_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] model_encoder_layers_13_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] model_encoder_layers_13_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[202] model_encoder_layers_13_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] model_encoder_layers_13_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[204] model_encoder_layers_13_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] model_encoder_layers_13_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[206] model_encoder_layers_13_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[207] model_encoder_layers_13_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[208] model_encoder_layers_13_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] model_encoder_layers_13_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[210] model_encoder_layers_13_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] model_encoder_layers_13_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[212] model_encoder_layers_13_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[213] model_encoder_layers_13_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[214] model_encoder_layers_14_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] model_encoder_layers_14_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] model_encoder_layers_14_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[217] model_encoder_layers_14_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] model_encoder_layers_14_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[219] model_encoder_layers_14_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] model_encoder_layers_14_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[221] model_encoder_layers_14_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[222] model_encoder_layers_14_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[223] model_encoder_layers_14_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] model_encoder_layers_14_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[225] model_encoder_layers_14_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] model_encoder_layers_14_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[227] model_encoder_layers_14_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[228] model_encoder_layers_14_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[229] model_encoder_layers_15_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] model_encoder_layers_15_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] model_encoder_layers_15_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[232] model_encoder_layers_15_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] model_encoder_layers_15_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[234] model_encoder_layers_15_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] model_encoder_layers_15_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[236] model_encoder_layers_15_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[237] model_encoder_layers_15_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[238] model_encoder_layers_15_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] model_encoder_layers_15_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[240] model_encoder_layers_15_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] model_encoder_layers_15_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[242] model_encoder_layers_15_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[243] model_encoder_layers_15_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[244] model_encoder_layers_16_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] model_encoder_layers_16_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] model_encoder_layers_16_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[247] model_encoder_layers_16_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] model_encoder_layers_16_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[249] model_encoder_layers_16_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] model_encoder_layers_16_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[251] model_encoder_layers_16_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[252] model_encoder_layers_16_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[253] model_encoder_layers_16_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] model_encoder_layers_16_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[255] model_encoder_layers_16_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] model_encoder_layers_16_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[257] model_encoder_layers_16_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[258] model_encoder_layers_16_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[259] model_encoder_layers_17_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] model_encoder_layers_17_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] model_encoder_layers_17_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[262] model_encoder_layers_17_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] model_encoder_layers_17_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[264] model_encoder_layers_17_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] model_encoder_layers_17_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[266] model_encoder_layers_17_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[267] model_encoder_layers_17_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[268] model_encoder_layers_17_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] model_encoder_layers_17_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[270] model_encoder_layers_17_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] model_encoder_layers_17_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[272] model_encoder_layers_17_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[273] model_encoder_layers_17_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[274] model_encoder_layers_18_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] model_encoder_layers_18_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] model_encoder_layers_18_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[277] model_encoder_layers_18_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] model_encoder_layers_18_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[279] model_encoder_layers_18_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] model_encoder_layers_18_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[281] model_encoder_layers_18_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[282] model_encoder_layers_18_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[283] model_encoder_layers_18_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] model_encoder_layers_18_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[285] model_encoder_layers_18_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] model_encoder_layers_18_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[287] model_encoder_layers_18_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[288] model_encoder_layers_18_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[289] model_encoder_layers_19_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] model_encoder_layers_19_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] model_encoder_layers_19_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[292] model_encoder_layers_19_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] model_encoder_layers_19_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[294] model_encoder_layers_19_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] model_encoder_layers_19_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[296] model_encoder_layers_19_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[297] model_encoder_layers_19_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[298] model_encoder_layers_19_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] model_encoder_layers_19_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[300] model_encoder_layers_19_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] model_encoder_layers_19_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[302] model_encoder_layers_19_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[303] model_encoder_layers_19_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[304] model_encoder_layers_20_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] model_encoder_layers_20_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] model_encoder_layers_20_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[307] model_encoder_layers_20_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] model_encoder_layers_20_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[309] model_encoder_layers_20_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] model_encoder_layers_20_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[311] model_encoder_layers_20_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[312] model_encoder_layers_20_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[313] model_encoder_layers_20_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] model_encoder_layers_20_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[315] model_encoder_layers_20_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] model_encoder_layers_20_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[317] model_encoder_layers_20_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[318] model_encoder_layers_20_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[319] model_encoder_layers_21_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] model_encoder_layers_21_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] model_encoder_layers_21_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[322] model_encoder_layers_21_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] model_encoder_layers_21_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[324] model_encoder_layers_21_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] model_encoder_layers_21_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[326] model_encoder_layers_21_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[327] model_encoder_layers_21_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[328] model_encoder_layers_21_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] model_encoder_layers_21_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[330] model_encoder_layers_21_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] model_encoder_layers_21_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[332] model_encoder_layers_21_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[333] model_encoder_layers_21_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[334] model_encoder_layers_22_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] model_encoder_layers_22_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] model_encoder_layers_22_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[337] model_encoder_layers_22_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] model_encoder_layers_22_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[339] model_encoder_layers_22_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] model_encoder_layers_22_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[341] model_encoder_layers_22_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[342] model_encoder_layers_22_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[343] model_encoder_layers_22_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] model_encoder_layers_22_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[345] model_encoder_layers_22_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] model_encoder_layers_22_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[347] model_encoder_layers_22_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[348] model_encoder_layers_22_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[349] model_encoder_layers_23_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] model_encoder_layers_23_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] model_encoder_layers_23_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[352] model_encoder_layers_23_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] model_encoder_layers_23_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[354] model_encoder_layers_23_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] model_encoder_layers_23_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[356] model_encoder_layers_23_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[357] model_encoder_layers_23_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[358] model_encoder_layers_23_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] model_encoder_layers_23_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[360] model_encoder_layers_23_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] model_encoder_layers_23_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[362] model_encoder_layers_23_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[363] model_encoder_layers_23_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[364] model_encoder_layers_24_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] model_encoder_layers_24_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] model_encoder_layers_24_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[367] model_encoder_layers_24_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] model_encoder_layers_24_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[369] model_encoder_layers_24_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] model_encoder_layers_24_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[371] model_encoder_layers_24_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[372] model_encoder_layers_24_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[373] model_encoder_layers_24_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] model_encoder_layers_24_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[375] model_encoder_layers_24_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] model_encoder_layers_24_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[377] model_encoder_layers_24_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[378] model_encoder_layers_24_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[379] model_encoder_layers_25_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] model_encoder_layers_25_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] model_encoder_layers_25_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[382] model_encoder_layers_25_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] model_encoder_layers_25_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[384] model_encoder_layers_25_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] model_encoder_layers_25_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[386] model_encoder_layers_25_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[387] model_encoder_layers_25_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[388] model_encoder_layers_25_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] model_encoder_layers_25_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[390] model_encoder_layers_25_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] model_encoder_layers_25_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[392] model_encoder_layers_25_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[393] model_encoder_layers_25_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[394] model_encoder_layers_26_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] model_encoder_layers_26_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] model_encoder_layers_26_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[397] model_encoder_layers_26_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] model_encoder_layers_26_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[399] model_encoder_layers_26_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] model_encoder_layers_26_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[401] model_encoder_layers_26_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[402] model_encoder_layers_26_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[403] model_encoder_layers_26_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] model_encoder_layers_26_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[405] model_encoder_layers_26_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] model_encoder_layers_26_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[407] model_encoder_layers_26_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[408] model_encoder_layers_26_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[409] model_encoder_layers_27_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] model_encoder_layers_27_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] model_encoder_layers_27_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[412] model_encoder_layers_27_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] model_encoder_layers_27_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[414] model_encoder_layers_27_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] model_encoder_layers_27_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[416] model_encoder_layers_27_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[417] model_encoder_layers_27_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[418] model_encoder_layers_27_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] model_encoder_layers_27_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[420] model_encoder_layers_27_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] model_encoder_layers_27_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[422] model_encoder_layers_27_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[423] model_encoder_layers_27_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[424] model_encoder_layers_28_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] model_encoder_layers_28_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] model_encoder_layers_28_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[427] model_encoder_layers_28_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] model_encoder_layers_28_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[429] model_encoder_layers_28_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] model_encoder_layers_28_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[431] model_encoder_layers_28_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[432] model_encoder_layers_28_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[433] model_encoder_layers_28_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] model_encoder_layers_28_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[435] model_encoder_layers_28_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] model_encoder_layers_28_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[437] model_encoder_layers_28_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[438] model_encoder_layers_28_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[439] model_encoder_layers_29_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] model_encoder_layers_29_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] model_encoder_layers_29_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[442] model_encoder_layers_29_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] model_encoder_layers_29_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[444] model_encoder_layers_29_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] model_encoder_layers_29_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[446] model_encoder_layers_29_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[447] model_encoder_layers_29_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[448] model_encoder_layers_29_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] model_encoder_layers_29_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[450] model_encoder_layers_29_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] model_encoder_layers_29_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[452] model_encoder_layers_29_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[453] model_encoder_layers_29_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[454] model_encoder_layers_30_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] model_encoder_layers_30_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] model_encoder_layers_30_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[457] model_encoder_layers_30_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] model_encoder_layers_30_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[459] model_encoder_layers_30_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] model_encoder_layers_30_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[461] model_encoder_layers_30_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[462] model_encoder_layers_30_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[463] model_encoder_layers_30_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] model_encoder_layers_30_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[465] model_encoder_layers_30_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] model_encoder_layers_30_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[467] model_encoder_layers_30_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[468] model_encoder_layers_30_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[469] model_encoder_layers_31_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] model_encoder_layers_31_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] model_encoder_layers_31_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[472] model_encoder_layers_31_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] model_encoder_layers_31_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[474] model_encoder_layers_31_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] model_encoder_layers_31_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[476] model_encoder_layers_31_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[477] model_encoder_layers_31_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[478] model_encoder_layers_31_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] model_encoder_layers_31_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[480] model_encoder_layers_31_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] model_encoder_layers_31_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[482] model_encoder_layers_31_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[483] model_encoder_layers_31_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[484] model_encoder_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[485] model_encoder_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[486] model_decoder_embed_tokens_weight4: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] model_decoder_embed_positions_weight4: R.Tensor((448, 1280), dtype="float16") = packed_params[488] model_decoder_layers_0_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] model_decoder_layers_0_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] model_decoder_layers_0_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[491] model_decoder_layers_0_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] model_decoder_layers_0_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[493] model_decoder_layers_0_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] model_decoder_layers_0_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[495] model_decoder_layers_0_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[496] model_decoder_layers_0_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[497] model_decoder_layers_0_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] model_decoder_layers_0_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] model_decoder_layers_0_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[500] model_decoder_layers_0_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] model_decoder_layers_0_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[502] model_decoder_layers_0_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] model_decoder_layers_0_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[504] model_decoder_layers_0_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[505] model_decoder_layers_0_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[506] model_decoder_layers_0_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] model_decoder_layers_0_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[508] model_decoder_layers_0_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] model_decoder_layers_0_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[510] model_decoder_layers_0_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[511] model_decoder_layers_0_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[512] model_decoder_layers_1_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] model_decoder_layers_1_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] model_decoder_layers_1_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[515] model_decoder_layers_1_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] model_decoder_layers_1_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[517] model_decoder_layers_1_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] model_decoder_layers_1_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[519] model_decoder_layers_1_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[520] model_decoder_layers_1_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[521] model_decoder_layers_1_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] model_decoder_layers_1_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] model_decoder_layers_1_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[524] model_decoder_layers_1_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] model_decoder_layers_1_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[526] model_decoder_layers_1_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] model_decoder_layers_1_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[528] model_decoder_layers_1_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[529] model_decoder_layers_1_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[530] model_decoder_layers_1_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] model_decoder_layers_1_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[532] model_decoder_layers_1_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] model_decoder_layers_1_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[534] model_decoder_layers_1_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[535] model_decoder_layers_1_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[536] model_decoder_layers_2_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] model_decoder_layers_2_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] model_decoder_layers_2_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[539] model_decoder_layers_2_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] model_decoder_layers_2_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[541] model_decoder_layers_2_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] model_decoder_layers_2_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[543] model_decoder_layers_2_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[544] model_decoder_layers_2_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[545] model_decoder_layers_2_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] model_decoder_layers_2_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] model_decoder_layers_2_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[548] model_decoder_layers_2_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] model_decoder_layers_2_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[550] model_decoder_layers_2_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] model_decoder_layers_2_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[552] model_decoder_layers_2_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[553] model_decoder_layers_2_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[554] model_decoder_layers_2_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] model_decoder_layers_2_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[556] model_decoder_layers_2_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] model_decoder_layers_2_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[558] model_decoder_layers_2_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[559] model_decoder_layers_2_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[560] model_decoder_layers_3_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] model_decoder_layers_3_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] model_decoder_layers_3_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[563] model_decoder_layers_3_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] model_decoder_layers_3_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[565] model_decoder_layers_3_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] model_decoder_layers_3_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[567] model_decoder_layers_3_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[568] model_decoder_layers_3_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[569] model_decoder_layers_3_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] model_decoder_layers_3_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] model_decoder_layers_3_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[572] model_decoder_layers_3_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] model_decoder_layers_3_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[574] model_decoder_layers_3_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] model_decoder_layers_3_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[576] model_decoder_layers_3_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[577] model_decoder_layers_3_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[578] model_decoder_layers_3_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] model_decoder_layers_3_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[580] model_decoder_layers_3_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] model_decoder_layers_3_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[582] model_decoder_layers_3_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[583] model_decoder_layers_3_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[584] model_decoder_layers_4_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] model_decoder_layers_4_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] model_decoder_layers_4_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[587] model_decoder_layers_4_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] model_decoder_layers_4_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[589] model_decoder_layers_4_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] model_decoder_layers_4_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[591] model_decoder_layers_4_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[592] model_decoder_layers_4_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[593] model_decoder_layers_4_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] model_decoder_layers_4_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] model_decoder_layers_4_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[596] model_decoder_layers_4_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] model_decoder_layers_4_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[598] model_decoder_layers_4_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] model_decoder_layers_4_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[600] model_decoder_layers_4_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[601] model_decoder_layers_4_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[602] model_decoder_layers_4_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] model_decoder_layers_4_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[604] model_decoder_layers_4_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] model_decoder_layers_4_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[606] model_decoder_layers_4_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[607] model_decoder_layers_4_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[608] model_decoder_layers_5_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] model_decoder_layers_5_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] model_decoder_layers_5_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[611] model_decoder_layers_5_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] model_decoder_layers_5_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[613] model_decoder_layers_5_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] model_decoder_layers_5_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[615] model_decoder_layers_5_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[616] model_decoder_layers_5_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[617] model_decoder_layers_5_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] model_decoder_layers_5_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] model_decoder_layers_5_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[620] model_decoder_layers_5_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] model_decoder_layers_5_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[622] model_decoder_layers_5_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] model_decoder_layers_5_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[624] model_decoder_layers_5_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[625] model_decoder_layers_5_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[626] model_decoder_layers_5_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] model_decoder_layers_5_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[628] model_decoder_layers_5_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] model_decoder_layers_5_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[630] model_decoder_layers_5_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[631] model_decoder_layers_5_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[632] model_decoder_layers_6_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] model_decoder_layers_6_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] model_decoder_layers_6_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[635] model_decoder_layers_6_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] model_decoder_layers_6_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[637] model_decoder_layers_6_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] model_decoder_layers_6_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[639] model_decoder_layers_6_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[640] model_decoder_layers_6_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[641] model_decoder_layers_6_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] model_decoder_layers_6_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] model_decoder_layers_6_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[644] model_decoder_layers_6_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] model_decoder_layers_6_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[646] model_decoder_layers_6_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] model_decoder_layers_6_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[648] model_decoder_layers_6_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[649] model_decoder_layers_6_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[650] model_decoder_layers_6_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] model_decoder_layers_6_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[652] model_decoder_layers_6_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] model_decoder_layers_6_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[654] model_decoder_layers_6_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[655] model_decoder_layers_6_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[656] model_decoder_layers_7_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] model_decoder_layers_7_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] model_decoder_layers_7_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[659] model_decoder_layers_7_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] model_decoder_layers_7_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[661] model_decoder_layers_7_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] model_decoder_layers_7_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[663] model_decoder_layers_7_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[664] model_decoder_layers_7_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[665] model_decoder_layers_7_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] model_decoder_layers_7_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] model_decoder_layers_7_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[668] model_decoder_layers_7_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] model_decoder_layers_7_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[670] model_decoder_layers_7_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] model_decoder_layers_7_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[672] model_decoder_layers_7_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[673] model_decoder_layers_7_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[674] model_decoder_layers_7_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] model_decoder_layers_7_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[676] model_decoder_layers_7_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] model_decoder_layers_7_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[678] model_decoder_layers_7_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[679] model_decoder_layers_7_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[680] model_decoder_layers_8_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] model_decoder_layers_8_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] model_decoder_layers_8_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[683] model_decoder_layers_8_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] model_decoder_layers_8_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[685] model_decoder_layers_8_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] model_decoder_layers_8_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[687] model_decoder_layers_8_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[688] model_decoder_layers_8_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[689] model_decoder_layers_8_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] model_decoder_layers_8_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] model_decoder_layers_8_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[692] model_decoder_layers_8_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] model_decoder_layers_8_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[694] model_decoder_layers_8_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] model_decoder_layers_8_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[696] model_decoder_layers_8_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[697] model_decoder_layers_8_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[698] model_decoder_layers_8_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] model_decoder_layers_8_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[700] model_decoder_layers_8_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] model_decoder_layers_8_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[702] model_decoder_layers_8_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[703] model_decoder_layers_8_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[704] model_decoder_layers_9_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] model_decoder_layers_9_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] model_decoder_layers_9_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[707] model_decoder_layers_9_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] model_decoder_layers_9_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[709] model_decoder_layers_9_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] model_decoder_layers_9_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[711] model_decoder_layers_9_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[712] model_decoder_layers_9_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[713] model_decoder_layers_9_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] model_decoder_layers_9_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] model_decoder_layers_9_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[716] model_decoder_layers_9_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] model_decoder_layers_9_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[718] model_decoder_layers_9_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] model_decoder_layers_9_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[720] model_decoder_layers_9_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[721] model_decoder_layers_9_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[722] model_decoder_layers_9_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] model_decoder_layers_9_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[724] model_decoder_layers_9_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] model_decoder_layers_9_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[726] model_decoder_layers_9_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[727] model_decoder_layers_9_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[728] model_decoder_layers_10_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] model_decoder_layers_10_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] model_decoder_layers_10_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[731] model_decoder_layers_10_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] model_decoder_layers_10_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[733] model_decoder_layers_10_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] model_decoder_layers_10_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[735] model_decoder_layers_10_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[736] model_decoder_layers_10_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[737] model_decoder_layers_10_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] model_decoder_layers_10_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] model_decoder_layers_10_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[740] model_decoder_layers_10_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] model_decoder_layers_10_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[742] model_decoder_layers_10_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] model_decoder_layers_10_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[744] model_decoder_layers_10_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[745] model_decoder_layers_10_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[746] model_decoder_layers_10_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] model_decoder_layers_10_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[748] model_decoder_layers_10_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] model_decoder_layers_10_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[750] model_decoder_layers_10_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[751] model_decoder_layers_10_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[752] model_decoder_layers_11_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] model_decoder_layers_11_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] model_decoder_layers_11_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[755] model_decoder_layers_11_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] model_decoder_layers_11_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[757] model_decoder_layers_11_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] model_decoder_layers_11_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[759] model_decoder_layers_11_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[760] model_decoder_layers_11_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[761] model_decoder_layers_11_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] model_decoder_layers_11_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] model_decoder_layers_11_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[764] model_decoder_layers_11_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] model_decoder_layers_11_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[766] model_decoder_layers_11_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] model_decoder_layers_11_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[768] model_decoder_layers_11_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[769] model_decoder_layers_11_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[770] model_decoder_layers_11_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] model_decoder_layers_11_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[772] model_decoder_layers_11_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] model_decoder_layers_11_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[774] model_decoder_layers_11_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[775] model_decoder_layers_11_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[776] model_decoder_layers_12_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] model_decoder_layers_12_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] model_decoder_layers_12_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[779] model_decoder_layers_12_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] model_decoder_layers_12_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[781] model_decoder_layers_12_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] model_decoder_layers_12_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[783] model_decoder_layers_12_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[784] model_decoder_layers_12_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[785] model_decoder_layers_12_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] model_decoder_layers_12_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] model_decoder_layers_12_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[788] model_decoder_layers_12_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] model_decoder_layers_12_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[790] model_decoder_layers_12_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] model_decoder_layers_12_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[792] model_decoder_layers_12_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[793] model_decoder_layers_12_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[794] model_decoder_layers_12_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] model_decoder_layers_12_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[796] model_decoder_layers_12_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] model_decoder_layers_12_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[798] model_decoder_layers_12_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[799] model_decoder_layers_12_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[800] model_decoder_layers_13_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] model_decoder_layers_13_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] model_decoder_layers_13_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[803] model_decoder_layers_13_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] model_decoder_layers_13_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[805] model_decoder_layers_13_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] model_decoder_layers_13_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[807] model_decoder_layers_13_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[808] model_decoder_layers_13_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[809] model_decoder_layers_13_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] model_decoder_layers_13_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] model_decoder_layers_13_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[812] model_decoder_layers_13_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] model_decoder_layers_13_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[814] model_decoder_layers_13_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] model_decoder_layers_13_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[816] model_decoder_layers_13_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[817] model_decoder_layers_13_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[818] model_decoder_layers_13_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] model_decoder_layers_13_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[820] model_decoder_layers_13_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] model_decoder_layers_13_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[822] model_decoder_layers_13_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[823] model_decoder_layers_13_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[824] model_decoder_layers_14_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] model_decoder_layers_14_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] model_decoder_layers_14_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[827] model_decoder_layers_14_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] model_decoder_layers_14_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[829] model_decoder_layers_14_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] model_decoder_layers_14_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[831] model_decoder_layers_14_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[832] model_decoder_layers_14_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[833] model_decoder_layers_14_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] model_decoder_layers_14_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] model_decoder_layers_14_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[836] model_decoder_layers_14_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] model_decoder_layers_14_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[838] model_decoder_layers_14_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] model_decoder_layers_14_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[840] model_decoder_layers_14_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[841] model_decoder_layers_14_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[842] model_decoder_layers_14_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] model_decoder_layers_14_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[844] model_decoder_layers_14_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] model_decoder_layers_14_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[846] model_decoder_layers_14_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[847] model_decoder_layers_14_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[848] model_decoder_layers_15_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] model_decoder_layers_15_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] model_decoder_layers_15_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[851] model_decoder_layers_15_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] model_decoder_layers_15_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[853] model_decoder_layers_15_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] model_decoder_layers_15_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[855] model_decoder_layers_15_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[856] model_decoder_layers_15_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[857] model_decoder_layers_15_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] model_decoder_layers_15_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] model_decoder_layers_15_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[860] model_decoder_layers_15_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] model_decoder_layers_15_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[862] model_decoder_layers_15_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] model_decoder_layers_15_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[864] model_decoder_layers_15_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[865] model_decoder_layers_15_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[866] model_decoder_layers_15_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] model_decoder_layers_15_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[868] model_decoder_layers_15_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] model_decoder_layers_15_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[870] model_decoder_layers_15_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[871] model_decoder_layers_15_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[872] model_decoder_layers_16_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] model_decoder_layers_16_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] model_decoder_layers_16_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[875] model_decoder_layers_16_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] model_decoder_layers_16_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[877] model_decoder_layers_16_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] model_decoder_layers_16_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[879] model_decoder_layers_16_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[880] model_decoder_layers_16_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[881] model_decoder_layers_16_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] model_decoder_layers_16_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] model_decoder_layers_16_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[884] model_decoder_layers_16_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] model_decoder_layers_16_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[886] model_decoder_layers_16_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] model_decoder_layers_16_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[888] model_decoder_layers_16_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[889] model_decoder_layers_16_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[890] model_decoder_layers_16_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] model_decoder_layers_16_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[892] model_decoder_layers_16_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] model_decoder_layers_16_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[894] model_decoder_layers_16_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[895] model_decoder_layers_16_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[896] model_decoder_layers_17_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] model_decoder_layers_17_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] model_decoder_layers_17_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[899] model_decoder_layers_17_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] model_decoder_layers_17_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[901] model_decoder_layers_17_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] model_decoder_layers_17_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[903] model_decoder_layers_17_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[904] model_decoder_layers_17_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[905] model_decoder_layers_17_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] model_decoder_layers_17_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] model_decoder_layers_17_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[908] model_decoder_layers_17_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] model_decoder_layers_17_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[910] model_decoder_layers_17_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] model_decoder_layers_17_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[912] model_decoder_layers_17_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[913] model_decoder_layers_17_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[914] model_decoder_layers_17_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] model_decoder_layers_17_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[916] model_decoder_layers_17_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] model_decoder_layers_17_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[918] model_decoder_layers_17_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[919] model_decoder_layers_17_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[920] model_decoder_layers_18_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] model_decoder_layers_18_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] model_decoder_layers_18_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[923] model_decoder_layers_18_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] model_decoder_layers_18_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[925] model_decoder_layers_18_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] model_decoder_layers_18_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[927] model_decoder_layers_18_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[928] model_decoder_layers_18_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[929] model_decoder_layers_18_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] model_decoder_layers_18_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] model_decoder_layers_18_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[932] model_decoder_layers_18_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] model_decoder_layers_18_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[934] model_decoder_layers_18_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] model_decoder_layers_18_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[936] model_decoder_layers_18_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[937] model_decoder_layers_18_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[938] model_decoder_layers_18_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] model_decoder_layers_18_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[940] model_decoder_layers_18_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] model_decoder_layers_18_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[942] model_decoder_layers_18_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[943] model_decoder_layers_18_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[944] model_decoder_layers_19_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] model_decoder_layers_19_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] model_decoder_layers_19_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[947] model_decoder_layers_19_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] model_decoder_layers_19_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[949] model_decoder_layers_19_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] model_decoder_layers_19_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[951] model_decoder_layers_19_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[952] model_decoder_layers_19_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[953] model_decoder_layers_19_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] model_decoder_layers_19_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] model_decoder_layers_19_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[956] model_decoder_layers_19_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] model_decoder_layers_19_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[958] model_decoder_layers_19_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] model_decoder_layers_19_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[960] model_decoder_layers_19_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[961] model_decoder_layers_19_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[962] model_decoder_layers_19_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] model_decoder_layers_19_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[964] model_decoder_layers_19_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] model_decoder_layers_19_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[966] model_decoder_layers_19_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[967] model_decoder_layers_19_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[968] model_decoder_layers_20_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] model_decoder_layers_20_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] model_decoder_layers_20_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[971] model_decoder_layers_20_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] model_decoder_layers_20_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[973] model_decoder_layers_20_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] model_decoder_layers_20_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[975] model_decoder_layers_20_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[976] model_decoder_layers_20_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[977] model_decoder_layers_20_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] model_decoder_layers_20_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] model_decoder_layers_20_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[980] model_decoder_layers_20_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] model_decoder_layers_20_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[982] model_decoder_layers_20_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] model_decoder_layers_20_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[984] model_decoder_layers_20_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[985] model_decoder_layers_20_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[986] model_decoder_layers_20_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] model_decoder_layers_20_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[988] model_decoder_layers_20_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] model_decoder_layers_20_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[990] model_decoder_layers_20_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[991] model_decoder_layers_20_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[992] model_decoder_layers_21_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] model_decoder_layers_21_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] model_decoder_layers_21_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[995] model_decoder_layers_21_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] model_decoder_layers_21_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[997] model_decoder_layers_21_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] model_decoder_layers_21_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[999] model_decoder_layers_21_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1000] model_decoder_layers_21_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1001] model_decoder_layers_21_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] model_decoder_layers_21_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] model_decoder_layers_21_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1004] model_decoder_layers_21_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] model_decoder_layers_21_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1006] model_decoder_layers_21_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] model_decoder_layers_21_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1008] model_decoder_layers_21_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1009] model_decoder_layers_21_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1010] model_decoder_layers_21_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] model_decoder_layers_21_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1012] model_decoder_layers_21_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] model_decoder_layers_21_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1014] model_decoder_layers_21_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1015] model_decoder_layers_21_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1016] model_decoder_layers_22_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] model_decoder_layers_22_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] model_decoder_layers_22_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1019] model_decoder_layers_22_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] model_decoder_layers_22_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1021] model_decoder_layers_22_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] model_decoder_layers_22_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1023] model_decoder_layers_22_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1024] model_decoder_layers_22_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1025] model_decoder_layers_22_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] model_decoder_layers_22_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] model_decoder_layers_22_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1028] model_decoder_layers_22_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] model_decoder_layers_22_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1030] model_decoder_layers_22_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] model_decoder_layers_22_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1032] model_decoder_layers_22_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1033] model_decoder_layers_22_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1034] model_decoder_layers_22_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] model_decoder_layers_22_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1036] model_decoder_layers_22_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] model_decoder_layers_22_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1038] model_decoder_layers_22_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1039] model_decoder_layers_22_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1040] model_decoder_layers_23_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] model_decoder_layers_23_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] model_decoder_layers_23_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1043] model_decoder_layers_23_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] model_decoder_layers_23_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1045] model_decoder_layers_23_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] model_decoder_layers_23_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1047] model_decoder_layers_23_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1048] model_decoder_layers_23_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1049] model_decoder_layers_23_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] model_decoder_layers_23_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] model_decoder_layers_23_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1052] model_decoder_layers_23_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] model_decoder_layers_23_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1054] model_decoder_layers_23_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] model_decoder_layers_23_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1056] model_decoder_layers_23_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1057] model_decoder_layers_23_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1058] model_decoder_layers_23_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] model_decoder_layers_23_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1060] model_decoder_layers_23_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] model_decoder_layers_23_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1062] model_decoder_layers_23_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1063] model_decoder_layers_23_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1064] model_decoder_layers_24_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] model_decoder_layers_24_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] model_decoder_layers_24_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1067] model_decoder_layers_24_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] model_decoder_layers_24_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1069] model_decoder_layers_24_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] model_decoder_layers_24_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1071] model_decoder_layers_24_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1072] model_decoder_layers_24_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1073] model_decoder_layers_24_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] model_decoder_layers_24_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] model_decoder_layers_24_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1076] model_decoder_layers_24_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] model_decoder_layers_24_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1078] model_decoder_layers_24_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] model_decoder_layers_24_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1080] model_decoder_layers_24_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1081] model_decoder_layers_24_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1082] model_decoder_layers_24_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] model_decoder_layers_24_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1084] model_decoder_layers_24_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] model_decoder_layers_24_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1086] model_decoder_layers_24_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1087] model_decoder_layers_24_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1088] model_decoder_layers_25_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] model_decoder_layers_25_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] model_decoder_layers_25_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1091] model_decoder_layers_25_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] model_decoder_layers_25_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1093] model_decoder_layers_25_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] model_decoder_layers_25_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1095] model_decoder_layers_25_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1096] model_decoder_layers_25_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1097] model_decoder_layers_25_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] model_decoder_layers_25_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] model_decoder_layers_25_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1100] model_decoder_layers_25_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] model_decoder_layers_25_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1102] model_decoder_layers_25_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] model_decoder_layers_25_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1104] model_decoder_layers_25_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1105] model_decoder_layers_25_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1106] model_decoder_layers_25_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] model_decoder_layers_25_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1108] model_decoder_layers_25_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] model_decoder_layers_25_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1110] model_decoder_layers_25_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1111] model_decoder_layers_25_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1112] model_decoder_layers_26_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] model_decoder_layers_26_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] model_decoder_layers_26_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1115] model_decoder_layers_26_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] model_decoder_layers_26_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1117] model_decoder_layers_26_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] model_decoder_layers_26_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1119] model_decoder_layers_26_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1120] model_decoder_layers_26_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1121] model_decoder_layers_26_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] model_decoder_layers_26_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] model_decoder_layers_26_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1124] model_decoder_layers_26_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] model_decoder_layers_26_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1126] model_decoder_layers_26_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] model_decoder_layers_26_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1128] model_decoder_layers_26_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1129] model_decoder_layers_26_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1130] model_decoder_layers_26_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] model_decoder_layers_26_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1132] model_decoder_layers_26_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] model_decoder_layers_26_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1134] model_decoder_layers_26_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1135] model_decoder_layers_26_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1136] model_decoder_layers_27_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] model_decoder_layers_27_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] model_decoder_layers_27_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1139] model_decoder_layers_27_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] model_decoder_layers_27_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1141] model_decoder_layers_27_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] model_decoder_layers_27_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1143] model_decoder_layers_27_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1144] model_decoder_layers_27_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1145] model_decoder_layers_27_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] model_decoder_layers_27_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] model_decoder_layers_27_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1148] model_decoder_layers_27_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] model_decoder_layers_27_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1150] model_decoder_layers_27_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] model_decoder_layers_27_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1152] model_decoder_layers_27_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1153] model_decoder_layers_27_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1154] model_decoder_layers_27_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] model_decoder_layers_27_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1156] model_decoder_layers_27_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] model_decoder_layers_27_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1158] model_decoder_layers_27_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1159] model_decoder_layers_27_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1160] model_decoder_layers_28_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] model_decoder_layers_28_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] model_decoder_layers_28_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1163] model_decoder_layers_28_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] model_decoder_layers_28_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1165] model_decoder_layers_28_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] model_decoder_layers_28_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1167] model_decoder_layers_28_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1168] model_decoder_layers_28_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1169] model_decoder_layers_28_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] model_decoder_layers_28_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] model_decoder_layers_28_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1172] model_decoder_layers_28_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] model_decoder_layers_28_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1174] model_decoder_layers_28_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] model_decoder_layers_28_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1176] model_decoder_layers_28_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1177] model_decoder_layers_28_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1178] model_decoder_layers_28_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] model_decoder_layers_28_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1180] model_decoder_layers_28_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] model_decoder_layers_28_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1182] model_decoder_layers_28_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1183] model_decoder_layers_28_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1184] model_decoder_layers_29_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] model_decoder_layers_29_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] model_decoder_layers_29_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1187] model_decoder_layers_29_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] model_decoder_layers_29_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1189] model_decoder_layers_29_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] model_decoder_layers_29_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1191] model_decoder_layers_29_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1192] model_decoder_layers_29_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1193] model_decoder_layers_29_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] model_decoder_layers_29_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] model_decoder_layers_29_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1196] model_decoder_layers_29_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] model_decoder_layers_29_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1198] model_decoder_layers_29_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] model_decoder_layers_29_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1200] model_decoder_layers_29_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1201] model_decoder_layers_29_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1202] model_decoder_layers_29_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] model_decoder_layers_29_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1204] model_decoder_layers_29_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] model_decoder_layers_29_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1206] model_decoder_layers_29_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1207] model_decoder_layers_29_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1208] model_decoder_layers_30_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] model_decoder_layers_30_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] model_decoder_layers_30_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1211] model_decoder_layers_30_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] model_decoder_layers_30_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1213] model_decoder_layers_30_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] model_decoder_layers_30_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1215] model_decoder_layers_30_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1216] model_decoder_layers_30_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1217] model_decoder_layers_30_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] model_decoder_layers_30_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] model_decoder_layers_30_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1220] model_decoder_layers_30_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] model_decoder_layers_30_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1222] model_decoder_layers_30_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] model_decoder_layers_30_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1224] model_decoder_layers_30_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1225] model_decoder_layers_30_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1226] model_decoder_layers_30_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] model_decoder_layers_30_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1228] model_decoder_layers_30_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] model_decoder_layers_30_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1230] model_decoder_layers_30_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1231] model_decoder_layers_30_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1232] model_decoder_layers_31_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] model_decoder_layers_31_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] model_decoder_layers_31_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1235] model_decoder_layers_31_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] model_decoder_layers_31_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1237] model_decoder_layers_31_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] model_decoder_layers_31_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1239] model_decoder_layers_31_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1240] model_decoder_layers_31_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1241] model_decoder_layers_31_encoder_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] model_decoder_layers_31_encoder_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] model_decoder_layers_31_encoder_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1244] model_decoder_layers_31_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] model_decoder_layers_31_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1246] model_decoder_layers_31_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] model_decoder_layers_31_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1248] model_decoder_layers_31_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1249] model_decoder_layers_31_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1250] model_decoder_layers_31_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] model_decoder_layers_31_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1252] model_decoder_layers_31_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] model_decoder_layers_31_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1254] model_decoder_layers_31_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1255] model_decoder_layers_31_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1256] model_decoder_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1257] model_decoder_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1258] reshape1030: R.Tensor((seq_len,), dtype="int32") = R.reshape(input_ids, R.shape([seq_len])) take5: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight4, reshape1030, axis=0) reshape1031: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take5, R.shape([1, seq_len, 1280])) lv198: R.Tensor((seq_len,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((seq_len,), dtype="int32"),)) take6: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight4, lv198, axis=0) reshape1032: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take6, R.shape([1, seq_len, 1280])) add899: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(reshape1031, reshape1032) layer_norm259: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add899, model_decoder_layers_0_self_attn_layer_norm_weight4, model_decoder_layers_0_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims771: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_q_proj_weight4, axes=None) matmul770: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm259, permute_dims771, out_dtype="void") add900: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul770, model_decoder_layers_0_self_attn_q_proj_bias4) reshape1033: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add900, R.shape([1, seq_len, 20, 64])) permute_dims772: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_k_proj_weight4, axes=None) matmul771: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm259, permute_dims772, out_dtype="void") reshape1034: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul771, R.shape([1, seq_len, 20, 64])) permute_dims773: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_v_proj_weight4, axes=None) matmul772: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm259, permute_dims773, out_dtype="void") add901: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul772, model_decoder_layers_0_self_attn_v_proj_bias4) reshape1035: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add901, R.shape([1, seq_len, 20, 64])) concat64: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1033, reshape1034, reshape1035), axis=2) reshape1036: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat64, R.shape([seq_len, 60, 64])) lv199 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1036), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1037: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv199, R.shape([1, seq_len, 20, 64])) reshape1038: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1037, R.shape([1, seq_len, 1280])) permute_dims774: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_out_proj_weight4, axes=None) matmul773: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1038, permute_dims774, out_dtype="void") add902: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul773, model_decoder_layers_0_self_attn_out_proj_bias4) add903: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add899, add902) layer_norm260: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add903, model_decoder_layers_0_encoder_attn_layer_norm_weight4, model_decoder_layers_0_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims775: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_q_proj_weight4, axes=None) matmul774: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm260, permute_dims775, out_dtype="void") add904: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul774, model_decoder_layers_0_encoder_attn_q_proj_bias4) reshape1039: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add904, R.shape([1, seq_len, 20, 64])) reshape1040: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1039, R.shape([seq_len, 20, 64])) lv200 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1040), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1041: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv200, R.shape([1, seq_len, 20, 64])) reshape1042: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1041, R.shape([1, seq_len, 1280])) permute_dims776: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_encoder_attn_out_proj_weight4, axes=None) matmul775: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1042, permute_dims776, out_dtype="void") add905: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul775, model_decoder_layers_0_encoder_attn_out_proj_bias4) add906: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add903, add905) layer_norm261: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add906, model_decoder_layers_0_final_layer_norm_weight4, model_decoder_layers_0_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims777: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc1_weight4, axes=None) matmul776: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm261, permute_dims777, out_dtype="void") add907: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul776, model_decoder_layers_0_fc1_bias4) gelu98: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add907) permute_dims778: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc2_weight4, axes=None) matmul777: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu98, permute_dims778, out_dtype="void") add908: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul777, model_decoder_layers_0_fc2_bias4) add909: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add906, add908) layer_norm262: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add909, model_decoder_layers_1_self_attn_layer_norm_weight4, model_decoder_layers_1_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims779: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_q_proj_weight4, axes=None) matmul778: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm262, permute_dims779, out_dtype="void") add910: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul778, model_decoder_layers_1_self_attn_q_proj_bias4) reshape1043: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add910, R.shape([1, seq_len, 20, 64])) permute_dims780: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_k_proj_weight4, axes=None) matmul779: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm262, permute_dims780, out_dtype="void") reshape1044: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul779, R.shape([1, seq_len, 20, 64])) permute_dims781: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_v_proj_weight4, axes=None) matmul780: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm262, permute_dims781, out_dtype="void") add911: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul780, model_decoder_layers_1_self_attn_v_proj_bias4) reshape1045: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add911, R.shape([1, seq_len, 20, 64])) concat65: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1043, reshape1044, reshape1045), axis=2) reshape1046: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat65, R.shape([seq_len, 60, 64])) lv201 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1046), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1047: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv201, R.shape([1, seq_len, 20, 64])) reshape1048: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1047, R.shape([1, seq_len, 1280])) permute_dims782: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_self_attn_out_proj_weight4, axes=None) matmul781: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1048, permute_dims782, out_dtype="void") add912: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul781, model_decoder_layers_1_self_attn_out_proj_bias4) add913: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add909, add912) layer_norm263: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add913, model_decoder_layers_1_encoder_attn_layer_norm_weight4, model_decoder_layers_1_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims783: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_q_proj_weight4, axes=None) matmul782: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm263, permute_dims783, out_dtype="void") add914: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul782, model_decoder_layers_1_encoder_attn_q_proj_bias4) reshape1049: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add914, R.shape([1, seq_len, 20, 64])) reshape1050: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1049, R.shape([seq_len, 20, 64])) lv202 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1050), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1051: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv202, R.shape([1, seq_len, 20, 64])) reshape1052: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1051, R.shape([1, seq_len, 1280])) permute_dims784: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_encoder_attn_out_proj_weight4, axes=None) matmul783: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1052, permute_dims784, out_dtype="void") add915: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul783, model_decoder_layers_1_encoder_attn_out_proj_bias4) add916: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add913, add915) layer_norm264: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add916, model_decoder_layers_1_final_layer_norm_weight4, model_decoder_layers_1_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims785: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc1_weight4, axes=None) matmul784: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm264, permute_dims785, out_dtype="void") add917: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul784, model_decoder_layers_1_fc1_bias4) gelu99: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add917) permute_dims786: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_1_fc2_weight4, axes=None) matmul785: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu99, permute_dims786, out_dtype="void") add918: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul785, model_decoder_layers_1_fc2_bias4) add919: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add916, add918) layer_norm265: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add919, model_decoder_layers_2_self_attn_layer_norm_weight4, model_decoder_layers_2_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims787: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_q_proj_weight4, axes=None) matmul786: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm265, permute_dims787, out_dtype="void") add920: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul786, model_decoder_layers_2_self_attn_q_proj_bias4) reshape1053: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add920, R.shape([1, seq_len, 20, 64])) permute_dims788: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_k_proj_weight4, axes=None) matmul787: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm265, permute_dims788, out_dtype="void") reshape1054: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul787, R.shape([1, seq_len, 20, 64])) permute_dims789: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_v_proj_weight4, axes=None) matmul788: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm265, permute_dims789, out_dtype="void") add921: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul788, model_decoder_layers_2_self_attn_v_proj_bias4) reshape1055: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add921, R.shape([1, seq_len, 20, 64])) concat66: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1053, reshape1054, reshape1055), axis=2) reshape1056: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat66, R.shape([seq_len, 60, 64])) lv203 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1056), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1057: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv203, R.shape([1, seq_len, 20, 64])) reshape1058: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1057, R.shape([1, seq_len, 1280])) permute_dims790: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_self_attn_out_proj_weight4, axes=None) matmul789: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1058, permute_dims790, out_dtype="void") add922: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul789, model_decoder_layers_2_self_attn_out_proj_bias4) add923: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add919, add922) layer_norm266: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add923, model_decoder_layers_2_encoder_attn_layer_norm_weight4, model_decoder_layers_2_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims791: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_q_proj_weight4, axes=None) matmul790: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm266, permute_dims791, out_dtype="void") add924: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul790, model_decoder_layers_2_encoder_attn_q_proj_bias4) reshape1059: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add924, R.shape([1, seq_len, 20, 64])) reshape1060: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1059, R.shape([seq_len, 20, 64])) lv204 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1060), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1061: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv204, R.shape([1, seq_len, 20, 64])) reshape1062: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1061, R.shape([1, seq_len, 1280])) permute_dims792: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_encoder_attn_out_proj_weight4, axes=None) matmul791: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1062, permute_dims792, out_dtype="void") add925: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul791, model_decoder_layers_2_encoder_attn_out_proj_bias4) add926: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add923, add925) layer_norm267: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add926, model_decoder_layers_2_final_layer_norm_weight4, model_decoder_layers_2_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims793: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc1_weight4, axes=None) matmul792: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm267, permute_dims793, out_dtype="void") add927: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul792, model_decoder_layers_2_fc1_bias4) gelu100: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add927) permute_dims794: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_2_fc2_weight4, axes=None) matmul793: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu100, permute_dims794, out_dtype="void") add928: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul793, model_decoder_layers_2_fc2_bias4) add929: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add926, add928) layer_norm268: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add929, model_decoder_layers_3_self_attn_layer_norm_weight4, model_decoder_layers_3_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims795: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_q_proj_weight4, axes=None) matmul794: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm268, permute_dims795, out_dtype="void") add930: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul794, model_decoder_layers_3_self_attn_q_proj_bias4) reshape1063: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add930, R.shape([1, seq_len, 20, 64])) permute_dims796: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_k_proj_weight4, axes=None) matmul795: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm268, permute_dims796, out_dtype="void") reshape1064: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul795, R.shape([1, seq_len, 20, 64])) permute_dims797: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_v_proj_weight4, axes=None) matmul796: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm268, permute_dims797, out_dtype="void") add931: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul796, model_decoder_layers_3_self_attn_v_proj_bias4) reshape1065: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add931, R.shape([1, seq_len, 20, 64])) concat67: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1063, reshape1064, reshape1065), axis=2) reshape1066: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat67, R.shape([seq_len, 60, 64])) lv205 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1066), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1067: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv205, R.shape([1, seq_len, 20, 64])) reshape1068: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1067, R.shape([1, seq_len, 1280])) permute_dims798: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_self_attn_out_proj_weight4, axes=None) matmul797: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1068, permute_dims798, out_dtype="void") add932: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul797, model_decoder_layers_3_self_attn_out_proj_bias4) add933: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add929, add932) layer_norm269: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add933, model_decoder_layers_3_encoder_attn_layer_norm_weight4, model_decoder_layers_3_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims799: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_q_proj_weight4, axes=None) matmul798: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm269, permute_dims799, out_dtype="void") add934: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul798, model_decoder_layers_3_encoder_attn_q_proj_bias4) reshape1069: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add934, R.shape([1, seq_len, 20, 64])) reshape1070: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1069, R.shape([seq_len, 20, 64])) lv206 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1070), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1071: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv206, R.shape([1, seq_len, 20, 64])) reshape1072: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1071, R.shape([1, seq_len, 1280])) permute_dims800: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_encoder_attn_out_proj_weight4, axes=None) matmul799: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1072, permute_dims800, out_dtype="void") add935: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul799, model_decoder_layers_3_encoder_attn_out_proj_bias4) add936: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add933, add935) layer_norm270: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add936, model_decoder_layers_3_final_layer_norm_weight4, model_decoder_layers_3_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims801: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc1_weight4, axes=None) matmul800: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm270, permute_dims801, out_dtype="void") add937: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul800, model_decoder_layers_3_fc1_bias4) gelu101: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add937) permute_dims802: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_3_fc2_weight4, axes=None) matmul801: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu101, permute_dims802, out_dtype="void") add938: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul801, model_decoder_layers_3_fc2_bias4) add939: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add936, add938) layer_norm271: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add939, model_decoder_layers_4_self_attn_layer_norm_weight4, model_decoder_layers_4_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims803: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_q_proj_weight4, axes=None) matmul802: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm271, permute_dims803, out_dtype="void") add940: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul802, model_decoder_layers_4_self_attn_q_proj_bias4) reshape1073: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add940, R.shape([1, seq_len, 20, 64])) permute_dims804: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_k_proj_weight4, axes=None) matmul803: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm271, permute_dims804, out_dtype="void") reshape1074: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul803, R.shape([1, seq_len, 20, 64])) permute_dims805: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_v_proj_weight4, axes=None) matmul804: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm271, permute_dims805, out_dtype="void") add941: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul804, model_decoder_layers_4_self_attn_v_proj_bias4) reshape1075: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add941, R.shape([1, seq_len, 20, 64])) concat68: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1073, reshape1074, reshape1075), axis=2) reshape1076: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat68, R.shape([seq_len, 60, 64])) lv207 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1076), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1077: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv207, R.shape([1, seq_len, 20, 64])) reshape1078: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1077, R.shape([1, seq_len, 1280])) permute_dims806: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_self_attn_out_proj_weight4, axes=None) matmul805: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1078, permute_dims806, out_dtype="void") add942: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul805, model_decoder_layers_4_self_attn_out_proj_bias4) add943: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add939, add942) layer_norm272: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add943, model_decoder_layers_4_encoder_attn_layer_norm_weight4, model_decoder_layers_4_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims807: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_q_proj_weight4, axes=None) matmul806: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm272, permute_dims807, out_dtype="void") add944: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul806, model_decoder_layers_4_encoder_attn_q_proj_bias4) reshape1079: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add944, R.shape([1, seq_len, 20, 64])) reshape1080: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1079, R.shape([seq_len, 20, 64])) lv208 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1080), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1081: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv208, R.shape([1, seq_len, 20, 64])) reshape1082: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1081, R.shape([1, seq_len, 1280])) permute_dims808: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_encoder_attn_out_proj_weight4, axes=None) matmul807: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1082, permute_dims808, out_dtype="void") add945: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul807, model_decoder_layers_4_encoder_attn_out_proj_bias4) add946: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add943, add945) layer_norm273: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add946, model_decoder_layers_4_final_layer_norm_weight4, model_decoder_layers_4_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims809: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc1_weight4, axes=None) matmul808: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm273, permute_dims809, out_dtype="void") add947: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul808, model_decoder_layers_4_fc1_bias4) gelu102: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add947) permute_dims810: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_4_fc2_weight4, axes=None) matmul809: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu102, permute_dims810, out_dtype="void") add948: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul809, model_decoder_layers_4_fc2_bias4) add949: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add946, add948) layer_norm274: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add949, model_decoder_layers_5_self_attn_layer_norm_weight4, model_decoder_layers_5_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims811: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_q_proj_weight4, axes=None) matmul810: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm274, permute_dims811, out_dtype="void") add950: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul810, model_decoder_layers_5_self_attn_q_proj_bias4) reshape1083: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add950, R.shape([1, seq_len, 20, 64])) permute_dims812: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_k_proj_weight4, axes=None) matmul811: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm274, permute_dims812, out_dtype="void") reshape1084: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul811, R.shape([1, seq_len, 20, 64])) permute_dims813: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_v_proj_weight4, axes=None) matmul812: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm274, permute_dims813, out_dtype="void") add951: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul812, model_decoder_layers_5_self_attn_v_proj_bias4) reshape1085: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add951, R.shape([1, seq_len, 20, 64])) concat69: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1083, reshape1084, reshape1085), axis=2) reshape1086: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat69, R.shape([seq_len, 60, 64])) lv209 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1086), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1087: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv209, R.shape([1, seq_len, 20, 64])) reshape1088: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1087, R.shape([1, seq_len, 1280])) permute_dims814: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_self_attn_out_proj_weight4, axes=None) matmul813: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1088, permute_dims814, out_dtype="void") add952: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul813, model_decoder_layers_5_self_attn_out_proj_bias4) add953: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add949, add952) layer_norm275: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add953, model_decoder_layers_5_encoder_attn_layer_norm_weight4, model_decoder_layers_5_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims815: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_q_proj_weight4, axes=None) matmul814: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm275, permute_dims815, out_dtype="void") add954: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul814, model_decoder_layers_5_encoder_attn_q_proj_bias4) reshape1089: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add954, R.shape([1, seq_len, 20, 64])) reshape1090: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1089, R.shape([seq_len, 20, 64])) lv210 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1090), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1091: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv210, R.shape([1, seq_len, 20, 64])) reshape1092: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1091, R.shape([1, seq_len, 1280])) permute_dims816: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_encoder_attn_out_proj_weight4, axes=None) matmul815: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1092, permute_dims816, out_dtype="void") add955: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul815, model_decoder_layers_5_encoder_attn_out_proj_bias4) add956: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add953, add955) layer_norm276: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add956, model_decoder_layers_5_final_layer_norm_weight4, model_decoder_layers_5_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims817: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc1_weight4, axes=None) matmul816: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm276, permute_dims817, out_dtype="void") add957: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul816, model_decoder_layers_5_fc1_bias4) gelu103: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add957) permute_dims818: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_5_fc2_weight4, axes=None) matmul817: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu103, permute_dims818, out_dtype="void") add958: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul817, model_decoder_layers_5_fc2_bias4) add959: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add956, add958) layer_norm277: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add959, model_decoder_layers_6_self_attn_layer_norm_weight4, model_decoder_layers_6_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims819: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_q_proj_weight4, axes=None) matmul818: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm277, permute_dims819, out_dtype="void") add960: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul818, model_decoder_layers_6_self_attn_q_proj_bias4) reshape1093: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add960, R.shape([1, seq_len, 20, 64])) permute_dims820: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_k_proj_weight4, axes=None) matmul819: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm277, permute_dims820, out_dtype="void") reshape1094: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul819, R.shape([1, seq_len, 20, 64])) permute_dims821: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_v_proj_weight4, axes=None) matmul820: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm277, permute_dims821, out_dtype="void") add961: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul820, model_decoder_layers_6_self_attn_v_proj_bias4) reshape1095: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add961, R.shape([1, seq_len, 20, 64])) concat70: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1093, reshape1094, reshape1095), axis=2) reshape1096: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat70, R.shape([seq_len, 60, 64])) lv211 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1096), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1097: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv211, R.shape([1, seq_len, 20, 64])) reshape1098: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1097, R.shape([1, seq_len, 1280])) permute_dims822: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_self_attn_out_proj_weight4, axes=None) matmul821: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1098, permute_dims822, out_dtype="void") add962: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul821, model_decoder_layers_6_self_attn_out_proj_bias4) add963: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add959, add962) layer_norm278: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add963, model_decoder_layers_6_encoder_attn_layer_norm_weight4, model_decoder_layers_6_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims823: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_q_proj_weight4, axes=None) matmul822: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm278, permute_dims823, out_dtype="void") add964: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul822, model_decoder_layers_6_encoder_attn_q_proj_bias4) reshape1099: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add964, R.shape([1, seq_len, 20, 64])) reshape1100: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1099, R.shape([seq_len, 20, 64])) lv212 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1100), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1101: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv212, R.shape([1, seq_len, 20, 64])) reshape1102: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1101, R.shape([1, seq_len, 1280])) permute_dims824: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_encoder_attn_out_proj_weight4, axes=None) matmul823: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1102, permute_dims824, out_dtype="void") add965: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul823, model_decoder_layers_6_encoder_attn_out_proj_bias4) add966: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add963, add965) layer_norm279: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add966, model_decoder_layers_6_final_layer_norm_weight4, model_decoder_layers_6_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims825: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc1_weight4, axes=None) matmul824: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm279, permute_dims825, out_dtype="void") add967: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul824, model_decoder_layers_6_fc1_bias4) gelu104: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add967) permute_dims826: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_6_fc2_weight4, axes=None) matmul825: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu104, permute_dims826, out_dtype="void") add968: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul825, model_decoder_layers_6_fc2_bias4) add969: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add966, add968) layer_norm280: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add969, model_decoder_layers_7_self_attn_layer_norm_weight4, model_decoder_layers_7_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims827: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_q_proj_weight4, axes=None) matmul826: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm280, permute_dims827, out_dtype="void") add970: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul826, model_decoder_layers_7_self_attn_q_proj_bias4) reshape1103: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add970, R.shape([1, seq_len, 20, 64])) permute_dims828: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_k_proj_weight4, axes=None) matmul827: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm280, permute_dims828, out_dtype="void") reshape1104: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul827, R.shape([1, seq_len, 20, 64])) permute_dims829: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_v_proj_weight4, axes=None) matmul828: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm280, permute_dims829, out_dtype="void") add971: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul828, model_decoder_layers_7_self_attn_v_proj_bias4) reshape1105: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add971, R.shape([1, seq_len, 20, 64])) concat71: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1103, reshape1104, reshape1105), axis=2) reshape1106: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat71, R.shape([seq_len, 60, 64])) lv213 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1106), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1107: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv213, R.shape([1, seq_len, 20, 64])) reshape1108: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1107, R.shape([1, seq_len, 1280])) permute_dims830: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_self_attn_out_proj_weight4, axes=None) matmul829: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1108, permute_dims830, out_dtype="void") add972: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul829, model_decoder_layers_7_self_attn_out_proj_bias4) add973: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add969, add972) layer_norm281: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add973, model_decoder_layers_7_encoder_attn_layer_norm_weight4, model_decoder_layers_7_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims831: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_q_proj_weight4, axes=None) matmul830: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm281, permute_dims831, out_dtype="void") add974: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul830, model_decoder_layers_7_encoder_attn_q_proj_bias4) reshape1109: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add974, R.shape([1, seq_len, 20, 64])) reshape1110: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1109, R.shape([seq_len, 20, 64])) lv214 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1110), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1111: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv214, R.shape([1, seq_len, 20, 64])) reshape1112: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1111, R.shape([1, seq_len, 1280])) permute_dims832: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_encoder_attn_out_proj_weight4, axes=None) matmul831: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1112, permute_dims832, out_dtype="void") add975: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul831, model_decoder_layers_7_encoder_attn_out_proj_bias4) add976: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add973, add975) layer_norm282: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add976, model_decoder_layers_7_final_layer_norm_weight4, model_decoder_layers_7_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims833: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc1_weight4, axes=None) matmul832: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm282, permute_dims833, out_dtype="void") add977: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul832, model_decoder_layers_7_fc1_bias4) gelu105: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add977) permute_dims834: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_7_fc2_weight4, axes=None) matmul833: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu105, permute_dims834, out_dtype="void") add978: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul833, model_decoder_layers_7_fc2_bias4) add979: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add976, add978) layer_norm283: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add979, model_decoder_layers_8_self_attn_layer_norm_weight4, model_decoder_layers_8_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims835: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_q_proj_weight4, axes=None) matmul834: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm283, permute_dims835, out_dtype="void") add980: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul834, model_decoder_layers_8_self_attn_q_proj_bias4) reshape1113: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add980, R.shape([1, seq_len, 20, 64])) permute_dims836: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_k_proj_weight4, axes=None) matmul835: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm283, permute_dims836, out_dtype="void") reshape1114: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul835, R.shape([1, seq_len, 20, 64])) permute_dims837: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_v_proj_weight4, axes=None) matmul836: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm283, permute_dims837, out_dtype="void") add981: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul836, model_decoder_layers_8_self_attn_v_proj_bias4) reshape1115: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add981, R.shape([1, seq_len, 20, 64])) concat72: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1113, reshape1114, reshape1115), axis=2) reshape1116: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat72, R.shape([seq_len, 60, 64])) lv215 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1116), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1117: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv215, R.shape([1, seq_len, 20, 64])) reshape1118: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1117, R.shape([1, seq_len, 1280])) permute_dims838: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_self_attn_out_proj_weight4, axes=None) matmul837: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1118, permute_dims838, out_dtype="void") add982: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul837, model_decoder_layers_8_self_attn_out_proj_bias4) add983: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add979, add982) layer_norm284: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add983, model_decoder_layers_8_encoder_attn_layer_norm_weight4, model_decoder_layers_8_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims839: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_q_proj_weight4, axes=None) matmul838: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm284, permute_dims839, out_dtype="void") add984: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul838, model_decoder_layers_8_encoder_attn_q_proj_bias4) reshape1119: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add984, R.shape([1, seq_len, 20, 64])) reshape1120: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1119, R.shape([seq_len, 20, 64])) lv216 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1120), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1121: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv216, R.shape([1, seq_len, 20, 64])) reshape1122: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1121, R.shape([1, seq_len, 1280])) permute_dims840: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_encoder_attn_out_proj_weight4, axes=None) matmul839: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1122, permute_dims840, out_dtype="void") add985: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul839, model_decoder_layers_8_encoder_attn_out_proj_bias4) add986: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add983, add985) layer_norm285: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add986, model_decoder_layers_8_final_layer_norm_weight4, model_decoder_layers_8_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims841: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc1_weight4, axes=None) matmul840: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm285, permute_dims841, out_dtype="void") add987: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul840, model_decoder_layers_8_fc1_bias4) gelu106: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add987) permute_dims842: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_8_fc2_weight4, axes=None) matmul841: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu106, permute_dims842, out_dtype="void") add988: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul841, model_decoder_layers_8_fc2_bias4) add989: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add986, add988) layer_norm286: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add989, model_decoder_layers_9_self_attn_layer_norm_weight4, model_decoder_layers_9_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims843: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_q_proj_weight4, axes=None) matmul842: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm286, permute_dims843, out_dtype="void") add990: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul842, model_decoder_layers_9_self_attn_q_proj_bias4) reshape1123: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add990, R.shape([1, seq_len, 20, 64])) permute_dims844: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_k_proj_weight4, axes=None) matmul843: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm286, permute_dims844, out_dtype="void") reshape1124: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul843, R.shape([1, seq_len, 20, 64])) permute_dims845: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_v_proj_weight4, axes=None) matmul844: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm286, permute_dims845, out_dtype="void") add991: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul844, model_decoder_layers_9_self_attn_v_proj_bias4) reshape1125: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add991, R.shape([1, seq_len, 20, 64])) concat73: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1123, reshape1124, reshape1125), axis=2) reshape1126: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat73, R.shape([seq_len, 60, 64])) lv217 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1126), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1127: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv217, R.shape([1, seq_len, 20, 64])) reshape1128: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1127, R.shape([1, seq_len, 1280])) permute_dims846: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_self_attn_out_proj_weight4, axes=None) matmul845: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1128, permute_dims846, out_dtype="void") add992: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul845, model_decoder_layers_9_self_attn_out_proj_bias4) add993: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add989, add992) layer_norm287: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add993, model_decoder_layers_9_encoder_attn_layer_norm_weight4, model_decoder_layers_9_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims847: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_q_proj_weight4, axes=None) matmul846: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm287, permute_dims847, out_dtype="void") add994: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul846, model_decoder_layers_9_encoder_attn_q_proj_bias4) reshape1129: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add994, R.shape([1, seq_len, 20, 64])) reshape1130: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1129, R.shape([seq_len, 20, 64])) lv218 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1130), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1131: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv218, R.shape([1, seq_len, 20, 64])) reshape1132: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1131, R.shape([1, seq_len, 1280])) permute_dims848: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_encoder_attn_out_proj_weight4, axes=None) matmul847: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1132, permute_dims848, out_dtype="void") add995: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul847, model_decoder_layers_9_encoder_attn_out_proj_bias4) add996: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add993, add995) layer_norm288: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add996, model_decoder_layers_9_final_layer_norm_weight4, model_decoder_layers_9_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims849: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc1_weight4, axes=None) matmul848: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm288, permute_dims849, out_dtype="void") add997: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul848, model_decoder_layers_9_fc1_bias4) gelu107: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add997) permute_dims850: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_9_fc2_weight4, axes=None) matmul849: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu107, permute_dims850, out_dtype="void") add998: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul849, model_decoder_layers_9_fc2_bias4) add999: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add996, add998) layer_norm289: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add999, model_decoder_layers_10_self_attn_layer_norm_weight4, model_decoder_layers_10_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims851: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_q_proj_weight4, axes=None) matmul850: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm289, permute_dims851, out_dtype="void") add1000: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul850, model_decoder_layers_10_self_attn_q_proj_bias4) reshape1133: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1000, R.shape([1, seq_len, 20, 64])) permute_dims852: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_k_proj_weight4, axes=None) matmul851: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm289, permute_dims852, out_dtype="void") reshape1134: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul851, R.shape([1, seq_len, 20, 64])) permute_dims853: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_v_proj_weight4, axes=None) matmul852: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm289, permute_dims853, out_dtype="void") add1001: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul852, model_decoder_layers_10_self_attn_v_proj_bias4) reshape1135: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1001, R.shape([1, seq_len, 20, 64])) concat74: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1133, reshape1134, reshape1135), axis=2) reshape1136: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat74, R.shape([seq_len, 60, 64])) lv219 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1136), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1137: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv219, R.shape([1, seq_len, 20, 64])) reshape1138: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1137, R.shape([1, seq_len, 1280])) permute_dims854: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_self_attn_out_proj_weight4, axes=None) matmul853: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1138, permute_dims854, out_dtype="void") add1002: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul853, model_decoder_layers_10_self_attn_out_proj_bias4) add1003: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add999, add1002) layer_norm290: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1003, model_decoder_layers_10_encoder_attn_layer_norm_weight4, model_decoder_layers_10_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims855: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_q_proj_weight4, axes=None) matmul854: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm290, permute_dims855, out_dtype="void") add1004: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul854, model_decoder_layers_10_encoder_attn_q_proj_bias4) reshape1139: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1004, R.shape([1, seq_len, 20, 64])) reshape1140: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1139, R.shape([seq_len, 20, 64])) lv220 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1140), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1141: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv220, R.shape([1, seq_len, 20, 64])) reshape1142: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1141, R.shape([1, seq_len, 1280])) permute_dims856: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_encoder_attn_out_proj_weight4, axes=None) matmul855: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1142, permute_dims856, out_dtype="void") add1005: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul855, model_decoder_layers_10_encoder_attn_out_proj_bias4) add1006: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1003, add1005) layer_norm291: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1006, model_decoder_layers_10_final_layer_norm_weight4, model_decoder_layers_10_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims857: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc1_weight4, axes=None) matmul856: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm291, permute_dims857, out_dtype="void") add1007: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul856, model_decoder_layers_10_fc1_bias4) gelu108: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1007) permute_dims858: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_10_fc2_weight4, axes=None) matmul857: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu108, permute_dims858, out_dtype="void") add1008: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul857, model_decoder_layers_10_fc2_bias4) add1009: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1006, add1008) layer_norm292: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1009, model_decoder_layers_11_self_attn_layer_norm_weight4, model_decoder_layers_11_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims859: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_q_proj_weight4, axes=None) matmul858: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm292, permute_dims859, out_dtype="void") add1010: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul858, model_decoder_layers_11_self_attn_q_proj_bias4) reshape1143: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1010, R.shape([1, seq_len, 20, 64])) permute_dims860: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_k_proj_weight4, axes=None) matmul859: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm292, permute_dims860, out_dtype="void") reshape1144: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul859, R.shape([1, seq_len, 20, 64])) permute_dims861: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_v_proj_weight4, axes=None) matmul860: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm292, permute_dims861, out_dtype="void") add1011: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul860, model_decoder_layers_11_self_attn_v_proj_bias4) reshape1145: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1011, R.shape([1, seq_len, 20, 64])) concat75: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1143, reshape1144, reshape1145), axis=2) reshape1146: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat75, R.shape([seq_len, 60, 64])) lv221 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1146), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1147: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv221, R.shape([1, seq_len, 20, 64])) reshape1148: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1147, R.shape([1, seq_len, 1280])) permute_dims862: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_self_attn_out_proj_weight4, axes=None) matmul861: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1148, permute_dims862, out_dtype="void") add1012: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul861, model_decoder_layers_11_self_attn_out_proj_bias4) add1013: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1009, add1012) layer_norm293: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1013, model_decoder_layers_11_encoder_attn_layer_norm_weight4, model_decoder_layers_11_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims863: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_q_proj_weight4, axes=None) matmul862: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm293, permute_dims863, out_dtype="void") add1014: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul862, model_decoder_layers_11_encoder_attn_q_proj_bias4) reshape1149: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1014, R.shape([1, seq_len, 20, 64])) reshape1150: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1149, R.shape([seq_len, 20, 64])) lv222 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1150), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1151: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv222, R.shape([1, seq_len, 20, 64])) reshape1152: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1151, R.shape([1, seq_len, 1280])) permute_dims864: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_encoder_attn_out_proj_weight4, axes=None) matmul863: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1152, permute_dims864, out_dtype="void") add1015: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul863, model_decoder_layers_11_encoder_attn_out_proj_bias4) add1016: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1013, add1015) layer_norm294: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1016, model_decoder_layers_11_final_layer_norm_weight4, model_decoder_layers_11_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims865: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc1_weight4, axes=None) matmul864: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm294, permute_dims865, out_dtype="void") add1017: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul864, model_decoder_layers_11_fc1_bias4) gelu109: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1017) permute_dims866: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_11_fc2_weight4, axes=None) matmul865: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu109, permute_dims866, out_dtype="void") add1018: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul865, model_decoder_layers_11_fc2_bias4) add1019: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1016, add1018) layer_norm295: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1019, model_decoder_layers_12_self_attn_layer_norm_weight4, model_decoder_layers_12_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims867: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_q_proj_weight4, axes=None) matmul866: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm295, permute_dims867, out_dtype="void") add1020: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul866, model_decoder_layers_12_self_attn_q_proj_bias4) reshape1153: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1020, R.shape([1, seq_len, 20, 64])) permute_dims868: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_k_proj_weight4, axes=None) matmul867: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm295, permute_dims868, out_dtype="void") reshape1154: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul867, R.shape([1, seq_len, 20, 64])) permute_dims869: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_v_proj_weight4, axes=None) matmul868: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm295, permute_dims869, out_dtype="void") add1021: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul868, model_decoder_layers_12_self_attn_v_proj_bias4) reshape1155: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1021, R.shape([1, seq_len, 20, 64])) concat76: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1153, reshape1154, reshape1155), axis=2) reshape1156: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat76, R.shape([seq_len, 60, 64])) lv223 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1156), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1157: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv223, R.shape([1, seq_len, 20, 64])) reshape1158: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1157, R.shape([1, seq_len, 1280])) permute_dims870: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_self_attn_out_proj_weight4, axes=None) matmul869: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1158, permute_dims870, out_dtype="void") add1022: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul869, model_decoder_layers_12_self_attn_out_proj_bias4) add1023: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1019, add1022) layer_norm296: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1023, model_decoder_layers_12_encoder_attn_layer_norm_weight4, model_decoder_layers_12_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims871: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_q_proj_weight4, axes=None) matmul870: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm296, permute_dims871, out_dtype="void") add1024: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul870, model_decoder_layers_12_encoder_attn_q_proj_bias4) reshape1159: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1024, R.shape([1, seq_len, 20, 64])) reshape1160: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1159, R.shape([seq_len, 20, 64])) lv224 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1160), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1161: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv224, R.shape([1, seq_len, 20, 64])) reshape1162: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1161, R.shape([1, seq_len, 1280])) permute_dims872: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_encoder_attn_out_proj_weight4, axes=None) matmul871: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1162, permute_dims872, out_dtype="void") add1025: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul871, model_decoder_layers_12_encoder_attn_out_proj_bias4) add1026: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1023, add1025) layer_norm297: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1026, model_decoder_layers_12_final_layer_norm_weight4, model_decoder_layers_12_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims873: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc1_weight4, axes=None) matmul872: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm297, permute_dims873, out_dtype="void") add1027: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul872, model_decoder_layers_12_fc1_bias4) gelu110: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1027) permute_dims874: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_12_fc2_weight4, axes=None) matmul873: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu110, permute_dims874, out_dtype="void") add1028: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul873, model_decoder_layers_12_fc2_bias4) add1029: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1026, add1028) layer_norm298: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1029, model_decoder_layers_13_self_attn_layer_norm_weight4, model_decoder_layers_13_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims875: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_q_proj_weight4, axes=None) matmul874: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm298, permute_dims875, out_dtype="void") add1030: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul874, model_decoder_layers_13_self_attn_q_proj_bias4) reshape1163: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1030, R.shape([1, seq_len, 20, 64])) permute_dims876: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_k_proj_weight4, axes=None) matmul875: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm298, permute_dims876, out_dtype="void") reshape1164: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul875, R.shape([1, seq_len, 20, 64])) permute_dims877: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_v_proj_weight4, axes=None) matmul876: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm298, permute_dims877, out_dtype="void") add1031: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul876, model_decoder_layers_13_self_attn_v_proj_bias4) reshape1165: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1031, R.shape([1, seq_len, 20, 64])) concat77: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1163, reshape1164, reshape1165), axis=2) reshape1166: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat77, R.shape([seq_len, 60, 64])) lv225 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1166), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1167: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv225, R.shape([1, seq_len, 20, 64])) reshape1168: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1167, R.shape([1, seq_len, 1280])) permute_dims878: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_self_attn_out_proj_weight4, axes=None) matmul877: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1168, permute_dims878, out_dtype="void") add1032: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul877, model_decoder_layers_13_self_attn_out_proj_bias4) add1033: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1029, add1032) layer_norm299: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1033, model_decoder_layers_13_encoder_attn_layer_norm_weight4, model_decoder_layers_13_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims879: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_q_proj_weight4, axes=None) matmul878: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm299, permute_dims879, out_dtype="void") add1034: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul878, model_decoder_layers_13_encoder_attn_q_proj_bias4) reshape1169: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1034, R.shape([1, seq_len, 20, 64])) reshape1170: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1169, R.shape([seq_len, 20, 64])) lv226 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1170), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1171: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv226, R.shape([1, seq_len, 20, 64])) reshape1172: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1171, R.shape([1, seq_len, 1280])) permute_dims880: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_encoder_attn_out_proj_weight4, axes=None) matmul879: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1172, permute_dims880, out_dtype="void") add1035: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul879, model_decoder_layers_13_encoder_attn_out_proj_bias4) add1036: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1033, add1035) layer_norm300: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1036, model_decoder_layers_13_final_layer_norm_weight4, model_decoder_layers_13_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims881: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc1_weight4, axes=None) matmul880: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm300, permute_dims881, out_dtype="void") add1037: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul880, model_decoder_layers_13_fc1_bias4) gelu111: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1037) permute_dims882: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_13_fc2_weight4, axes=None) matmul881: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu111, permute_dims882, out_dtype="void") add1038: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul881, model_decoder_layers_13_fc2_bias4) add1039: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1036, add1038) layer_norm301: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1039, model_decoder_layers_14_self_attn_layer_norm_weight4, model_decoder_layers_14_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims883: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_q_proj_weight4, axes=None) matmul882: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm301, permute_dims883, out_dtype="void") add1040: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul882, model_decoder_layers_14_self_attn_q_proj_bias4) reshape1173: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1040, R.shape([1, seq_len, 20, 64])) permute_dims884: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_k_proj_weight4, axes=None) matmul883: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm301, permute_dims884, out_dtype="void") reshape1174: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul883, R.shape([1, seq_len, 20, 64])) permute_dims885: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_v_proj_weight4, axes=None) matmul884: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm301, permute_dims885, out_dtype="void") add1041: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul884, model_decoder_layers_14_self_attn_v_proj_bias4) reshape1175: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1041, R.shape([1, seq_len, 20, 64])) concat78: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1173, reshape1174, reshape1175), axis=2) reshape1176: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat78, R.shape([seq_len, 60, 64])) lv227 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1176), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1177: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv227, R.shape([1, seq_len, 20, 64])) reshape1178: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1177, R.shape([1, seq_len, 1280])) permute_dims886: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_self_attn_out_proj_weight4, axes=None) matmul885: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1178, permute_dims886, out_dtype="void") add1042: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul885, model_decoder_layers_14_self_attn_out_proj_bias4) add1043: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1039, add1042) layer_norm302: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1043, model_decoder_layers_14_encoder_attn_layer_norm_weight4, model_decoder_layers_14_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims887: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_q_proj_weight4, axes=None) matmul886: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm302, permute_dims887, out_dtype="void") add1044: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul886, model_decoder_layers_14_encoder_attn_q_proj_bias4) reshape1179: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1044, R.shape([1, seq_len, 20, 64])) reshape1180: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1179, R.shape([seq_len, 20, 64])) lv228 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1180), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1181: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv228, R.shape([1, seq_len, 20, 64])) reshape1182: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1181, R.shape([1, seq_len, 1280])) permute_dims888: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_encoder_attn_out_proj_weight4, axes=None) matmul887: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1182, permute_dims888, out_dtype="void") add1045: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul887, model_decoder_layers_14_encoder_attn_out_proj_bias4) add1046: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1043, add1045) layer_norm303: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1046, model_decoder_layers_14_final_layer_norm_weight4, model_decoder_layers_14_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims889: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc1_weight4, axes=None) matmul888: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm303, permute_dims889, out_dtype="void") add1047: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul888, model_decoder_layers_14_fc1_bias4) gelu112: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1047) permute_dims890: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_14_fc2_weight4, axes=None) matmul889: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu112, permute_dims890, out_dtype="void") add1048: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul889, model_decoder_layers_14_fc2_bias4) add1049: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1046, add1048) layer_norm304: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1049, model_decoder_layers_15_self_attn_layer_norm_weight4, model_decoder_layers_15_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims891: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_q_proj_weight4, axes=None) matmul890: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm304, permute_dims891, out_dtype="void") add1050: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul890, model_decoder_layers_15_self_attn_q_proj_bias4) reshape1183: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1050, R.shape([1, seq_len, 20, 64])) permute_dims892: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_k_proj_weight4, axes=None) matmul891: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm304, permute_dims892, out_dtype="void") reshape1184: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul891, R.shape([1, seq_len, 20, 64])) permute_dims893: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_v_proj_weight4, axes=None) matmul892: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm304, permute_dims893, out_dtype="void") add1051: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul892, model_decoder_layers_15_self_attn_v_proj_bias4) reshape1185: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1051, R.shape([1, seq_len, 20, 64])) concat79: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1183, reshape1184, reshape1185), axis=2) reshape1186: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat79, R.shape([seq_len, 60, 64])) lv229 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1186), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1187: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv229, R.shape([1, seq_len, 20, 64])) reshape1188: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1187, R.shape([1, seq_len, 1280])) permute_dims894: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_self_attn_out_proj_weight4, axes=None) matmul893: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1188, permute_dims894, out_dtype="void") add1052: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul893, model_decoder_layers_15_self_attn_out_proj_bias4) add1053: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1049, add1052) layer_norm305: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1053, model_decoder_layers_15_encoder_attn_layer_norm_weight4, model_decoder_layers_15_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims895: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_q_proj_weight4, axes=None) matmul894: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm305, permute_dims895, out_dtype="void") add1054: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul894, model_decoder_layers_15_encoder_attn_q_proj_bias4) reshape1189: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1054, R.shape([1, seq_len, 20, 64])) reshape1190: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1189, R.shape([seq_len, 20, 64])) lv230 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1190), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1191: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv230, R.shape([1, seq_len, 20, 64])) reshape1192: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1191, R.shape([1, seq_len, 1280])) permute_dims896: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_encoder_attn_out_proj_weight4, axes=None) matmul895: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1192, permute_dims896, out_dtype="void") add1055: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul895, model_decoder_layers_15_encoder_attn_out_proj_bias4) add1056: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1053, add1055) layer_norm306: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1056, model_decoder_layers_15_final_layer_norm_weight4, model_decoder_layers_15_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims897: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc1_weight4, axes=None) matmul896: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm306, permute_dims897, out_dtype="void") add1057: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul896, model_decoder_layers_15_fc1_bias4) gelu113: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1057) permute_dims898: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_15_fc2_weight4, axes=None) matmul897: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu113, permute_dims898, out_dtype="void") add1058: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul897, model_decoder_layers_15_fc2_bias4) add1059: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1056, add1058) layer_norm307: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1059, model_decoder_layers_16_self_attn_layer_norm_weight4, model_decoder_layers_16_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims899: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_q_proj_weight4, axes=None) matmul898: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm307, permute_dims899, out_dtype="void") add1060: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul898, model_decoder_layers_16_self_attn_q_proj_bias4) reshape1193: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1060, R.shape([1, seq_len, 20, 64])) permute_dims900: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_k_proj_weight4, axes=None) matmul899: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm307, permute_dims900, out_dtype="void") reshape1194: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul899, R.shape([1, seq_len, 20, 64])) permute_dims901: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_v_proj_weight4, axes=None) matmul900: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm307, permute_dims901, out_dtype="void") add1061: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul900, model_decoder_layers_16_self_attn_v_proj_bias4) reshape1195: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1061, R.shape([1, seq_len, 20, 64])) concat80: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1193, reshape1194, reshape1195), axis=2) reshape1196: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat80, R.shape([seq_len, 60, 64])) lv231 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1196), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1197: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv231, R.shape([1, seq_len, 20, 64])) reshape1198: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1197, R.shape([1, seq_len, 1280])) permute_dims902: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_self_attn_out_proj_weight4, axes=None) matmul901: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1198, permute_dims902, out_dtype="void") add1062: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul901, model_decoder_layers_16_self_attn_out_proj_bias4) add1063: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1059, add1062) layer_norm308: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1063, model_decoder_layers_16_encoder_attn_layer_norm_weight4, model_decoder_layers_16_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims903: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_q_proj_weight4, axes=None) matmul902: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm308, permute_dims903, out_dtype="void") add1064: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul902, model_decoder_layers_16_encoder_attn_q_proj_bias4) reshape1199: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1064, R.shape([1, seq_len, 20, 64])) reshape1200: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1199, R.shape([seq_len, 20, 64])) lv232 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1200), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1201: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv232, R.shape([1, seq_len, 20, 64])) reshape1202: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1201, R.shape([1, seq_len, 1280])) permute_dims904: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_encoder_attn_out_proj_weight4, axes=None) matmul903: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1202, permute_dims904, out_dtype="void") add1065: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul903, model_decoder_layers_16_encoder_attn_out_proj_bias4) add1066: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1063, add1065) layer_norm309: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1066, model_decoder_layers_16_final_layer_norm_weight4, model_decoder_layers_16_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims905: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc1_weight4, axes=None) matmul904: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm309, permute_dims905, out_dtype="void") add1067: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul904, model_decoder_layers_16_fc1_bias4) gelu114: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1067) permute_dims906: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_16_fc2_weight4, axes=None) matmul905: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu114, permute_dims906, out_dtype="void") add1068: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul905, model_decoder_layers_16_fc2_bias4) add1069: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1066, add1068) layer_norm310: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1069, model_decoder_layers_17_self_attn_layer_norm_weight4, model_decoder_layers_17_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims907: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_q_proj_weight4, axes=None) matmul906: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm310, permute_dims907, out_dtype="void") add1070: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul906, model_decoder_layers_17_self_attn_q_proj_bias4) reshape1203: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1070, R.shape([1, seq_len, 20, 64])) permute_dims908: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_k_proj_weight4, axes=None) matmul907: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm310, permute_dims908, out_dtype="void") reshape1204: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul907, R.shape([1, seq_len, 20, 64])) permute_dims909: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_v_proj_weight4, axes=None) matmul908: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm310, permute_dims909, out_dtype="void") add1071: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul908, model_decoder_layers_17_self_attn_v_proj_bias4) reshape1205: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1071, R.shape([1, seq_len, 20, 64])) concat81: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1203, reshape1204, reshape1205), axis=2) reshape1206: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat81, R.shape([seq_len, 60, 64])) lv233 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1206), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1207: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv233, R.shape([1, seq_len, 20, 64])) reshape1208: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1207, R.shape([1, seq_len, 1280])) permute_dims910: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_self_attn_out_proj_weight4, axes=None) matmul909: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1208, permute_dims910, out_dtype="void") add1072: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul909, model_decoder_layers_17_self_attn_out_proj_bias4) add1073: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1069, add1072) layer_norm311: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1073, model_decoder_layers_17_encoder_attn_layer_norm_weight4, model_decoder_layers_17_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims911: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_q_proj_weight4, axes=None) matmul910: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm311, permute_dims911, out_dtype="void") add1074: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul910, model_decoder_layers_17_encoder_attn_q_proj_bias4) reshape1209: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1074, R.shape([1, seq_len, 20, 64])) reshape1210: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1209, R.shape([seq_len, 20, 64])) lv234 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1210), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1211: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv234, R.shape([1, seq_len, 20, 64])) reshape1212: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1211, R.shape([1, seq_len, 1280])) permute_dims912: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_encoder_attn_out_proj_weight4, axes=None) matmul911: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1212, permute_dims912, out_dtype="void") add1075: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul911, model_decoder_layers_17_encoder_attn_out_proj_bias4) add1076: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1073, add1075) layer_norm312: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1076, model_decoder_layers_17_final_layer_norm_weight4, model_decoder_layers_17_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims913: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc1_weight4, axes=None) matmul912: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm312, permute_dims913, out_dtype="void") add1077: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul912, model_decoder_layers_17_fc1_bias4) gelu115: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1077) permute_dims914: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_17_fc2_weight4, axes=None) matmul913: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu115, permute_dims914, out_dtype="void") add1078: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul913, model_decoder_layers_17_fc2_bias4) add1079: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1076, add1078) layer_norm313: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1079, model_decoder_layers_18_self_attn_layer_norm_weight4, model_decoder_layers_18_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims915: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_q_proj_weight4, axes=None) matmul914: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm313, permute_dims915, out_dtype="void") add1080: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul914, model_decoder_layers_18_self_attn_q_proj_bias4) reshape1213: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1080, R.shape([1, seq_len, 20, 64])) permute_dims916: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_k_proj_weight4, axes=None) matmul915: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm313, permute_dims916, out_dtype="void") reshape1214: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul915, R.shape([1, seq_len, 20, 64])) permute_dims917: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_v_proj_weight4, axes=None) matmul916: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm313, permute_dims917, out_dtype="void") add1081: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul916, model_decoder_layers_18_self_attn_v_proj_bias4) reshape1215: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1081, R.shape([1, seq_len, 20, 64])) concat82: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1213, reshape1214, reshape1215), axis=2) reshape1216: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat82, R.shape([seq_len, 60, 64])) lv235 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1216), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1217: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv235, R.shape([1, seq_len, 20, 64])) reshape1218: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1217, R.shape([1, seq_len, 1280])) permute_dims918: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_self_attn_out_proj_weight4, axes=None) matmul917: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1218, permute_dims918, out_dtype="void") add1082: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul917, model_decoder_layers_18_self_attn_out_proj_bias4) add1083: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1079, add1082) layer_norm314: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1083, model_decoder_layers_18_encoder_attn_layer_norm_weight4, model_decoder_layers_18_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims919: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_q_proj_weight4, axes=None) matmul918: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm314, permute_dims919, out_dtype="void") add1084: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul918, model_decoder_layers_18_encoder_attn_q_proj_bias4) reshape1219: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1084, R.shape([1, seq_len, 20, 64])) reshape1220: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1219, R.shape([seq_len, 20, 64])) lv236 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1220), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1221: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv236, R.shape([1, seq_len, 20, 64])) reshape1222: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1221, R.shape([1, seq_len, 1280])) permute_dims920: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_encoder_attn_out_proj_weight4, axes=None) matmul919: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1222, permute_dims920, out_dtype="void") add1085: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul919, model_decoder_layers_18_encoder_attn_out_proj_bias4) add1086: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1083, add1085) layer_norm315: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1086, model_decoder_layers_18_final_layer_norm_weight4, model_decoder_layers_18_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims921: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc1_weight4, axes=None) matmul920: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm315, permute_dims921, out_dtype="void") add1087: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul920, model_decoder_layers_18_fc1_bias4) gelu116: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1087) permute_dims922: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_18_fc2_weight4, axes=None) matmul921: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu116, permute_dims922, out_dtype="void") add1088: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul921, model_decoder_layers_18_fc2_bias4) add1089: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1086, add1088) layer_norm316: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1089, model_decoder_layers_19_self_attn_layer_norm_weight4, model_decoder_layers_19_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims923: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_q_proj_weight4, axes=None) matmul922: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm316, permute_dims923, out_dtype="void") add1090: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul922, model_decoder_layers_19_self_attn_q_proj_bias4) reshape1223: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1090, R.shape([1, seq_len, 20, 64])) permute_dims924: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_k_proj_weight4, axes=None) matmul923: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm316, permute_dims924, out_dtype="void") reshape1224: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul923, R.shape([1, seq_len, 20, 64])) permute_dims925: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_v_proj_weight4, axes=None) matmul924: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm316, permute_dims925, out_dtype="void") add1091: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul924, model_decoder_layers_19_self_attn_v_proj_bias4) reshape1225: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1091, R.shape([1, seq_len, 20, 64])) concat83: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1223, reshape1224, reshape1225), axis=2) reshape1226: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat83, R.shape([seq_len, 60, 64])) lv237 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1226), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1227: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv237, R.shape([1, seq_len, 20, 64])) reshape1228: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1227, R.shape([1, seq_len, 1280])) permute_dims926: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_self_attn_out_proj_weight4, axes=None) matmul925: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1228, permute_dims926, out_dtype="void") add1092: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul925, model_decoder_layers_19_self_attn_out_proj_bias4) add1093: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1089, add1092) layer_norm317: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1093, model_decoder_layers_19_encoder_attn_layer_norm_weight4, model_decoder_layers_19_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims927: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_q_proj_weight4, axes=None) matmul926: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm317, permute_dims927, out_dtype="void") add1094: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul926, model_decoder_layers_19_encoder_attn_q_proj_bias4) reshape1229: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1094, R.shape([1, seq_len, 20, 64])) reshape1230: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1229, R.shape([seq_len, 20, 64])) lv238 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1230), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1231: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv238, R.shape([1, seq_len, 20, 64])) reshape1232: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1231, R.shape([1, seq_len, 1280])) permute_dims928: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_encoder_attn_out_proj_weight4, axes=None) matmul927: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1232, permute_dims928, out_dtype="void") add1095: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul927, model_decoder_layers_19_encoder_attn_out_proj_bias4) add1096: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1093, add1095) layer_norm318: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1096, model_decoder_layers_19_final_layer_norm_weight4, model_decoder_layers_19_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims929: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc1_weight4, axes=None) matmul928: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm318, permute_dims929, out_dtype="void") add1097: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul928, model_decoder_layers_19_fc1_bias4) gelu117: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1097) permute_dims930: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_19_fc2_weight4, axes=None) matmul929: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu117, permute_dims930, out_dtype="void") add1098: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul929, model_decoder_layers_19_fc2_bias4) add1099: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1096, add1098) layer_norm319: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1099, model_decoder_layers_20_self_attn_layer_norm_weight4, model_decoder_layers_20_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims931: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_q_proj_weight4, axes=None) matmul930: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm319, permute_dims931, out_dtype="void") add1100: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul930, model_decoder_layers_20_self_attn_q_proj_bias4) reshape1233: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1100, R.shape([1, seq_len, 20, 64])) permute_dims932: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_k_proj_weight4, axes=None) matmul931: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm319, permute_dims932, out_dtype="void") reshape1234: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul931, R.shape([1, seq_len, 20, 64])) permute_dims933: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_v_proj_weight4, axes=None) matmul932: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm319, permute_dims933, out_dtype="void") add1101: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul932, model_decoder_layers_20_self_attn_v_proj_bias4) reshape1235: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1101, R.shape([1, seq_len, 20, 64])) concat84: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1233, reshape1234, reshape1235), axis=2) reshape1236: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat84, R.shape([seq_len, 60, 64])) lv239 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1236), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1237: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv239, R.shape([1, seq_len, 20, 64])) reshape1238: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1237, R.shape([1, seq_len, 1280])) permute_dims934: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_self_attn_out_proj_weight4, axes=None) matmul933: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1238, permute_dims934, out_dtype="void") add1102: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul933, model_decoder_layers_20_self_attn_out_proj_bias4) add1103: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1099, add1102) layer_norm320: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1103, model_decoder_layers_20_encoder_attn_layer_norm_weight4, model_decoder_layers_20_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims935: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_q_proj_weight4, axes=None) matmul934: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm320, permute_dims935, out_dtype="void") add1104: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul934, model_decoder_layers_20_encoder_attn_q_proj_bias4) reshape1239: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1104, R.shape([1, seq_len, 20, 64])) reshape1240: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1239, R.shape([seq_len, 20, 64])) lv240 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1240), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1241: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv240, R.shape([1, seq_len, 20, 64])) reshape1242: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1241, R.shape([1, seq_len, 1280])) permute_dims936: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_encoder_attn_out_proj_weight4, axes=None) matmul935: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1242, permute_dims936, out_dtype="void") add1105: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul935, model_decoder_layers_20_encoder_attn_out_proj_bias4) add1106: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1103, add1105) layer_norm321: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1106, model_decoder_layers_20_final_layer_norm_weight4, model_decoder_layers_20_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims937: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc1_weight4, axes=None) matmul936: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm321, permute_dims937, out_dtype="void") add1107: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul936, model_decoder_layers_20_fc1_bias4) gelu118: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1107) permute_dims938: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_20_fc2_weight4, axes=None) matmul937: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu118, permute_dims938, out_dtype="void") add1108: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul937, model_decoder_layers_20_fc2_bias4) add1109: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1106, add1108) layer_norm322: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1109, model_decoder_layers_21_self_attn_layer_norm_weight4, model_decoder_layers_21_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims939: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_q_proj_weight4, axes=None) matmul938: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm322, permute_dims939, out_dtype="void") add1110: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul938, model_decoder_layers_21_self_attn_q_proj_bias4) reshape1243: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1110, R.shape([1, seq_len, 20, 64])) permute_dims940: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_k_proj_weight4, axes=None) matmul939: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm322, permute_dims940, out_dtype="void") reshape1244: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul939, R.shape([1, seq_len, 20, 64])) permute_dims941: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_v_proj_weight4, axes=None) matmul940: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm322, permute_dims941, out_dtype="void") add1111: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul940, model_decoder_layers_21_self_attn_v_proj_bias4) reshape1245: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1111, R.shape([1, seq_len, 20, 64])) concat85: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1243, reshape1244, reshape1245), axis=2) reshape1246: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat85, R.shape([seq_len, 60, 64])) lv241 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1246), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1247: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv241, R.shape([1, seq_len, 20, 64])) reshape1248: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1247, R.shape([1, seq_len, 1280])) permute_dims942: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_self_attn_out_proj_weight4, axes=None) matmul941: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1248, permute_dims942, out_dtype="void") add1112: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul941, model_decoder_layers_21_self_attn_out_proj_bias4) add1113: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1109, add1112) layer_norm323: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1113, model_decoder_layers_21_encoder_attn_layer_norm_weight4, model_decoder_layers_21_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims943: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_q_proj_weight4, axes=None) matmul942: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm323, permute_dims943, out_dtype="void") add1114: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul942, model_decoder_layers_21_encoder_attn_q_proj_bias4) reshape1249: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1114, R.shape([1, seq_len, 20, 64])) reshape1250: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1249, R.shape([seq_len, 20, 64])) lv242 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1250), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1251: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv242, R.shape([1, seq_len, 20, 64])) reshape1252: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1251, R.shape([1, seq_len, 1280])) permute_dims944: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_encoder_attn_out_proj_weight4, axes=None) matmul943: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1252, permute_dims944, out_dtype="void") add1115: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul943, model_decoder_layers_21_encoder_attn_out_proj_bias4) add1116: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1113, add1115) layer_norm324: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1116, model_decoder_layers_21_final_layer_norm_weight4, model_decoder_layers_21_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims945: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc1_weight4, axes=None) matmul944: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm324, permute_dims945, out_dtype="void") add1117: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul944, model_decoder_layers_21_fc1_bias4) gelu119: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1117) permute_dims946: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_21_fc2_weight4, axes=None) matmul945: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu119, permute_dims946, out_dtype="void") add1118: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul945, model_decoder_layers_21_fc2_bias4) add1119: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1116, add1118) layer_norm325: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1119, model_decoder_layers_22_self_attn_layer_norm_weight4, model_decoder_layers_22_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims947: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_q_proj_weight4, axes=None) matmul946: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm325, permute_dims947, out_dtype="void") add1120: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul946, model_decoder_layers_22_self_attn_q_proj_bias4) reshape1253: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1120, R.shape([1, seq_len, 20, 64])) permute_dims948: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_k_proj_weight4, axes=None) matmul947: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm325, permute_dims948, out_dtype="void") reshape1254: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul947, R.shape([1, seq_len, 20, 64])) permute_dims949: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_v_proj_weight4, axes=None) matmul948: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm325, permute_dims949, out_dtype="void") add1121: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul948, model_decoder_layers_22_self_attn_v_proj_bias4) reshape1255: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1121, R.shape([1, seq_len, 20, 64])) concat86: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1253, reshape1254, reshape1255), axis=2) reshape1256: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat86, R.shape([seq_len, 60, 64])) lv243 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1256), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1257: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv243, R.shape([1, seq_len, 20, 64])) reshape1258: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1257, R.shape([1, seq_len, 1280])) permute_dims950: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_self_attn_out_proj_weight4, axes=None) matmul949: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1258, permute_dims950, out_dtype="void") add1122: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul949, model_decoder_layers_22_self_attn_out_proj_bias4) add1123: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1119, add1122) layer_norm326: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1123, model_decoder_layers_22_encoder_attn_layer_norm_weight4, model_decoder_layers_22_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims951: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_q_proj_weight4, axes=None) matmul950: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm326, permute_dims951, out_dtype="void") add1124: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul950, model_decoder_layers_22_encoder_attn_q_proj_bias4) reshape1259: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1124, R.shape([1, seq_len, 20, 64])) reshape1260: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1259, R.shape([seq_len, 20, 64])) lv244 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1260), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1261: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv244, R.shape([1, seq_len, 20, 64])) reshape1262: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1261, R.shape([1, seq_len, 1280])) permute_dims952: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_encoder_attn_out_proj_weight4, axes=None) matmul951: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1262, permute_dims952, out_dtype="void") add1125: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul951, model_decoder_layers_22_encoder_attn_out_proj_bias4) add1126: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1123, add1125) layer_norm327: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1126, model_decoder_layers_22_final_layer_norm_weight4, model_decoder_layers_22_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims953: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc1_weight4, axes=None) matmul952: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm327, permute_dims953, out_dtype="void") add1127: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul952, model_decoder_layers_22_fc1_bias4) gelu120: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1127) permute_dims954: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_22_fc2_weight4, axes=None) matmul953: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu120, permute_dims954, out_dtype="void") add1128: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul953, model_decoder_layers_22_fc2_bias4) add1129: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1126, add1128) layer_norm328: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1129, model_decoder_layers_23_self_attn_layer_norm_weight4, model_decoder_layers_23_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims955: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_q_proj_weight4, axes=None) matmul954: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm328, permute_dims955, out_dtype="void") add1130: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul954, model_decoder_layers_23_self_attn_q_proj_bias4) reshape1263: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1130, R.shape([1, seq_len, 20, 64])) permute_dims956: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_k_proj_weight4, axes=None) matmul955: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm328, permute_dims956, out_dtype="void") reshape1264: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul955, R.shape([1, seq_len, 20, 64])) permute_dims957: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_v_proj_weight4, axes=None) matmul956: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm328, permute_dims957, out_dtype="void") add1131: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul956, model_decoder_layers_23_self_attn_v_proj_bias4) reshape1265: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1131, R.shape([1, seq_len, 20, 64])) concat87: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1263, reshape1264, reshape1265), axis=2) reshape1266: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat87, R.shape([seq_len, 60, 64])) lv245 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1266), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1267: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv245, R.shape([1, seq_len, 20, 64])) reshape1268: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1267, R.shape([1, seq_len, 1280])) permute_dims958: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_self_attn_out_proj_weight4, axes=None) matmul957: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1268, permute_dims958, out_dtype="void") add1132: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul957, model_decoder_layers_23_self_attn_out_proj_bias4) add1133: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1129, add1132) layer_norm329: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1133, model_decoder_layers_23_encoder_attn_layer_norm_weight4, model_decoder_layers_23_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims959: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_q_proj_weight4, axes=None) matmul958: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm329, permute_dims959, out_dtype="void") add1134: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul958, model_decoder_layers_23_encoder_attn_q_proj_bias4) reshape1269: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1134, R.shape([1, seq_len, 20, 64])) reshape1270: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1269, R.shape([seq_len, 20, 64])) lv246 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1270), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1271: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv246, R.shape([1, seq_len, 20, 64])) reshape1272: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1271, R.shape([1, seq_len, 1280])) permute_dims960: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_encoder_attn_out_proj_weight4, axes=None) matmul959: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1272, permute_dims960, out_dtype="void") add1135: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul959, model_decoder_layers_23_encoder_attn_out_proj_bias4) add1136: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1133, add1135) layer_norm330: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1136, model_decoder_layers_23_final_layer_norm_weight4, model_decoder_layers_23_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims961: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc1_weight4, axes=None) matmul960: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm330, permute_dims961, out_dtype="void") add1137: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul960, model_decoder_layers_23_fc1_bias4) gelu121: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1137) permute_dims962: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_23_fc2_weight4, axes=None) matmul961: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu121, permute_dims962, out_dtype="void") add1138: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul961, model_decoder_layers_23_fc2_bias4) add1139: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1136, add1138) layer_norm331: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1139, model_decoder_layers_24_self_attn_layer_norm_weight4, model_decoder_layers_24_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims963: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_q_proj_weight4, axes=None) matmul962: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm331, permute_dims963, out_dtype="void") add1140: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul962, model_decoder_layers_24_self_attn_q_proj_bias4) reshape1273: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1140, R.shape([1, seq_len, 20, 64])) permute_dims964: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_k_proj_weight4, axes=None) matmul963: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm331, permute_dims964, out_dtype="void") reshape1274: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul963, R.shape([1, seq_len, 20, 64])) permute_dims965: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_v_proj_weight4, axes=None) matmul964: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm331, permute_dims965, out_dtype="void") add1141: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul964, model_decoder_layers_24_self_attn_v_proj_bias4) reshape1275: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1141, R.shape([1, seq_len, 20, 64])) concat88: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1273, reshape1274, reshape1275), axis=2) reshape1276: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat88, R.shape([seq_len, 60, 64])) lv247 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1276), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1277: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv247, R.shape([1, seq_len, 20, 64])) reshape1278: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1277, R.shape([1, seq_len, 1280])) permute_dims966: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_self_attn_out_proj_weight4, axes=None) matmul965: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1278, permute_dims966, out_dtype="void") add1142: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul965, model_decoder_layers_24_self_attn_out_proj_bias4) add1143: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1139, add1142) layer_norm332: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1143, model_decoder_layers_24_encoder_attn_layer_norm_weight4, model_decoder_layers_24_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims967: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_q_proj_weight4, axes=None) matmul966: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm332, permute_dims967, out_dtype="void") add1144: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul966, model_decoder_layers_24_encoder_attn_q_proj_bias4) reshape1279: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1144, R.shape([1, seq_len, 20, 64])) reshape1280: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1279, R.shape([seq_len, 20, 64])) lv248 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1280), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1281: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv248, R.shape([1, seq_len, 20, 64])) reshape1282: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1281, R.shape([1, seq_len, 1280])) permute_dims968: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_encoder_attn_out_proj_weight4, axes=None) matmul967: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1282, permute_dims968, out_dtype="void") add1145: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul967, model_decoder_layers_24_encoder_attn_out_proj_bias4) add1146: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1143, add1145) layer_norm333: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1146, model_decoder_layers_24_final_layer_norm_weight4, model_decoder_layers_24_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims969: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc1_weight4, axes=None) matmul968: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm333, permute_dims969, out_dtype="void") add1147: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul968, model_decoder_layers_24_fc1_bias4) gelu122: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1147) permute_dims970: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_24_fc2_weight4, axes=None) matmul969: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu122, permute_dims970, out_dtype="void") add1148: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul969, model_decoder_layers_24_fc2_bias4) add1149: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1146, add1148) layer_norm334: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1149, model_decoder_layers_25_self_attn_layer_norm_weight4, model_decoder_layers_25_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims971: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_q_proj_weight4, axes=None) matmul970: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm334, permute_dims971, out_dtype="void") add1150: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul970, model_decoder_layers_25_self_attn_q_proj_bias4) reshape1283: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1150, R.shape([1, seq_len, 20, 64])) permute_dims972: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_k_proj_weight4, axes=None) matmul971: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm334, permute_dims972, out_dtype="void") reshape1284: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul971, R.shape([1, seq_len, 20, 64])) permute_dims973: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_v_proj_weight4, axes=None) matmul972: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm334, permute_dims973, out_dtype="void") add1151: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul972, model_decoder_layers_25_self_attn_v_proj_bias4) reshape1285: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1151, R.shape([1, seq_len, 20, 64])) concat89: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1283, reshape1284, reshape1285), axis=2) reshape1286: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat89, R.shape([seq_len, 60, 64])) lv249 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1286), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1287: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv249, R.shape([1, seq_len, 20, 64])) reshape1288: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1287, R.shape([1, seq_len, 1280])) permute_dims974: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_self_attn_out_proj_weight4, axes=None) matmul973: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1288, permute_dims974, out_dtype="void") add1152: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul973, model_decoder_layers_25_self_attn_out_proj_bias4) add1153: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1149, add1152) layer_norm335: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1153, model_decoder_layers_25_encoder_attn_layer_norm_weight4, model_decoder_layers_25_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims975: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_q_proj_weight4, axes=None) matmul974: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm335, permute_dims975, out_dtype="void") add1154: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul974, model_decoder_layers_25_encoder_attn_q_proj_bias4) reshape1289: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1154, R.shape([1, seq_len, 20, 64])) reshape1290: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1289, R.shape([seq_len, 20, 64])) lv250 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1290), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1291: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv250, R.shape([1, seq_len, 20, 64])) reshape1292: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1291, R.shape([1, seq_len, 1280])) permute_dims976: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_encoder_attn_out_proj_weight4, axes=None) matmul975: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1292, permute_dims976, out_dtype="void") add1155: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul975, model_decoder_layers_25_encoder_attn_out_proj_bias4) add1156: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1153, add1155) layer_norm336: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1156, model_decoder_layers_25_final_layer_norm_weight4, model_decoder_layers_25_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims977: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc1_weight4, axes=None) matmul976: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm336, permute_dims977, out_dtype="void") add1157: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul976, model_decoder_layers_25_fc1_bias4) gelu123: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1157) permute_dims978: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_25_fc2_weight4, axes=None) matmul977: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu123, permute_dims978, out_dtype="void") add1158: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul977, model_decoder_layers_25_fc2_bias4) add1159: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1156, add1158) layer_norm337: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1159, model_decoder_layers_26_self_attn_layer_norm_weight4, model_decoder_layers_26_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims979: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_q_proj_weight4, axes=None) matmul978: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm337, permute_dims979, out_dtype="void") add1160: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul978, model_decoder_layers_26_self_attn_q_proj_bias4) reshape1293: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1160, R.shape([1, seq_len, 20, 64])) permute_dims980: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_k_proj_weight4, axes=None) matmul979: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm337, permute_dims980, out_dtype="void") reshape1294: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul979, R.shape([1, seq_len, 20, 64])) permute_dims981: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_v_proj_weight4, axes=None) matmul980: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm337, permute_dims981, out_dtype="void") add1161: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul980, model_decoder_layers_26_self_attn_v_proj_bias4) reshape1295: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1161, R.shape([1, seq_len, 20, 64])) concat90: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1293, reshape1294, reshape1295), axis=2) reshape1296: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat90, R.shape([seq_len, 60, 64])) lv251 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1296), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1297: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv251, R.shape([1, seq_len, 20, 64])) reshape1298: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1297, R.shape([1, seq_len, 1280])) permute_dims982: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_self_attn_out_proj_weight4, axes=None) matmul981: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1298, permute_dims982, out_dtype="void") add1162: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul981, model_decoder_layers_26_self_attn_out_proj_bias4) add1163: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1159, add1162) layer_norm338: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1163, model_decoder_layers_26_encoder_attn_layer_norm_weight4, model_decoder_layers_26_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims983: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_q_proj_weight4, axes=None) matmul982: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm338, permute_dims983, out_dtype="void") add1164: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul982, model_decoder_layers_26_encoder_attn_q_proj_bias4) reshape1299: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1164, R.shape([1, seq_len, 20, 64])) reshape1300: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1299, R.shape([seq_len, 20, 64])) lv252 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1300), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1301: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv252, R.shape([1, seq_len, 20, 64])) reshape1302: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1301, R.shape([1, seq_len, 1280])) permute_dims984: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_encoder_attn_out_proj_weight4, axes=None) matmul983: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1302, permute_dims984, out_dtype="void") add1165: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul983, model_decoder_layers_26_encoder_attn_out_proj_bias4) add1166: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1163, add1165) layer_norm339: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1166, model_decoder_layers_26_final_layer_norm_weight4, model_decoder_layers_26_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims985: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc1_weight4, axes=None) matmul984: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm339, permute_dims985, out_dtype="void") add1167: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul984, model_decoder_layers_26_fc1_bias4) gelu124: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1167) permute_dims986: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_26_fc2_weight4, axes=None) matmul985: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu124, permute_dims986, out_dtype="void") add1168: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul985, model_decoder_layers_26_fc2_bias4) add1169: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1166, add1168) layer_norm340: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1169, model_decoder_layers_27_self_attn_layer_norm_weight4, model_decoder_layers_27_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims987: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_q_proj_weight4, axes=None) matmul986: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm340, permute_dims987, out_dtype="void") add1170: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul986, model_decoder_layers_27_self_attn_q_proj_bias4) reshape1303: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1170, R.shape([1, seq_len, 20, 64])) permute_dims988: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_k_proj_weight4, axes=None) matmul987: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm340, permute_dims988, out_dtype="void") reshape1304: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul987, R.shape([1, seq_len, 20, 64])) permute_dims989: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_v_proj_weight4, axes=None) matmul988: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm340, permute_dims989, out_dtype="void") add1171: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul988, model_decoder_layers_27_self_attn_v_proj_bias4) reshape1305: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1171, R.shape([1, seq_len, 20, 64])) concat91: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1303, reshape1304, reshape1305), axis=2) reshape1306: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat91, R.shape([seq_len, 60, 64])) lv253 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1306), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1307: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv253, R.shape([1, seq_len, 20, 64])) reshape1308: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1307, R.shape([1, seq_len, 1280])) permute_dims990: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_self_attn_out_proj_weight4, axes=None) matmul989: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1308, permute_dims990, out_dtype="void") add1172: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul989, model_decoder_layers_27_self_attn_out_proj_bias4) add1173: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1169, add1172) layer_norm341: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1173, model_decoder_layers_27_encoder_attn_layer_norm_weight4, model_decoder_layers_27_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims991: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_q_proj_weight4, axes=None) matmul990: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm341, permute_dims991, out_dtype="void") add1174: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul990, model_decoder_layers_27_encoder_attn_q_proj_bias4) reshape1309: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1174, R.shape([1, seq_len, 20, 64])) reshape1310: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1309, R.shape([seq_len, 20, 64])) lv254 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1310), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1311: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv254, R.shape([1, seq_len, 20, 64])) reshape1312: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1311, R.shape([1, seq_len, 1280])) permute_dims992: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_encoder_attn_out_proj_weight4, axes=None) matmul991: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1312, permute_dims992, out_dtype="void") add1175: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul991, model_decoder_layers_27_encoder_attn_out_proj_bias4) add1176: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1173, add1175) layer_norm342: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1176, model_decoder_layers_27_final_layer_norm_weight4, model_decoder_layers_27_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims993: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc1_weight4, axes=None) matmul992: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm342, permute_dims993, out_dtype="void") add1177: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul992, model_decoder_layers_27_fc1_bias4) gelu125: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1177) permute_dims994: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_27_fc2_weight4, axes=None) matmul993: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu125, permute_dims994, out_dtype="void") add1178: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul993, model_decoder_layers_27_fc2_bias4) add1179: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1176, add1178) layer_norm343: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1179, model_decoder_layers_28_self_attn_layer_norm_weight4, model_decoder_layers_28_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims995: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_q_proj_weight4, axes=None) matmul994: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm343, permute_dims995, out_dtype="void") add1180: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul994, model_decoder_layers_28_self_attn_q_proj_bias4) reshape1313: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1180, R.shape([1, seq_len, 20, 64])) permute_dims996: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_k_proj_weight4, axes=None) matmul995: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm343, permute_dims996, out_dtype="void") reshape1314: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul995, R.shape([1, seq_len, 20, 64])) permute_dims997: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_v_proj_weight4, axes=None) matmul996: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm343, permute_dims997, out_dtype="void") add1181: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul996, model_decoder_layers_28_self_attn_v_proj_bias4) reshape1315: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1181, R.shape([1, seq_len, 20, 64])) concat92: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1313, reshape1314, reshape1315), axis=2) reshape1316: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat92, R.shape([seq_len, 60, 64])) lv255 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1316), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1317: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv255, R.shape([1, seq_len, 20, 64])) reshape1318: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1317, R.shape([1, seq_len, 1280])) permute_dims998: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_self_attn_out_proj_weight4, axes=None) matmul997: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1318, permute_dims998, out_dtype="void") add1182: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul997, model_decoder_layers_28_self_attn_out_proj_bias4) add1183: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1179, add1182) layer_norm344: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1183, model_decoder_layers_28_encoder_attn_layer_norm_weight4, model_decoder_layers_28_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims999: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_q_proj_weight4, axes=None) matmul998: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm344, permute_dims999, out_dtype="void") add1184: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul998, model_decoder_layers_28_encoder_attn_q_proj_bias4) reshape1319: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1184, R.shape([1, seq_len, 20, 64])) reshape1320: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1319, R.shape([seq_len, 20, 64])) lv256 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1320), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1321: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv256, R.shape([1, seq_len, 20, 64])) reshape1322: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1321, R.shape([1, seq_len, 1280])) permute_dims1000: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_encoder_attn_out_proj_weight4, axes=None) matmul999: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1322, permute_dims1000, out_dtype="void") add1185: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul999, model_decoder_layers_28_encoder_attn_out_proj_bias4) add1186: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1183, add1185) layer_norm345: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1186, model_decoder_layers_28_final_layer_norm_weight4, model_decoder_layers_28_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1001: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc1_weight4, axes=None) matmul1000: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm345, permute_dims1001, out_dtype="void") add1187: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul1000, model_decoder_layers_28_fc1_bias4) gelu126: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1187) permute_dims1002: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_28_fc2_weight4, axes=None) matmul1001: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu126, permute_dims1002, out_dtype="void") add1188: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1001, model_decoder_layers_28_fc2_bias4) add1189: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1186, add1188) layer_norm346: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1189, model_decoder_layers_29_self_attn_layer_norm_weight4, model_decoder_layers_29_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1003: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_q_proj_weight4, axes=None) matmul1002: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm346, permute_dims1003, out_dtype="void") add1190: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1002, model_decoder_layers_29_self_attn_q_proj_bias4) reshape1323: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1190, R.shape([1, seq_len, 20, 64])) permute_dims1004: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_k_proj_weight4, axes=None) matmul1003: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm346, permute_dims1004, out_dtype="void") reshape1324: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul1003, R.shape([1, seq_len, 20, 64])) permute_dims1005: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_v_proj_weight4, axes=None) matmul1004: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm346, permute_dims1005, out_dtype="void") add1191: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1004, model_decoder_layers_29_self_attn_v_proj_bias4) reshape1325: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1191, R.shape([1, seq_len, 20, 64])) concat93: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1323, reshape1324, reshape1325), axis=2) reshape1326: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat93, R.shape([seq_len, 60, 64])) lv257 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1326), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1327: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv257, R.shape([1, seq_len, 20, 64])) reshape1328: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1327, R.shape([1, seq_len, 1280])) permute_dims1006: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_self_attn_out_proj_weight4, axes=None) matmul1005: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1328, permute_dims1006, out_dtype="void") add1192: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1005, model_decoder_layers_29_self_attn_out_proj_bias4) add1193: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1189, add1192) layer_norm347: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1193, model_decoder_layers_29_encoder_attn_layer_norm_weight4, model_decoder_layers_29_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1007: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_q_proj_weight4, axes=None) matmul1006: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm347, permute_dims1007, out_dtype="void") add1194: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1006, model_decoder_layers_29_encoder_attn_q_proj_bias4) reshape1329: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1194, R.shape([1, seq_len, 20, 64])) reshape1330: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1329, R.shape([seq_len, 20, 64])) lv258 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1330), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1331: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv258, R.shape([1, seq_len, 20, 64])) reshape1332: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1331, R.shape([1, seq_len, 1280])) permute_dims1008: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_encoder_attn_out_proj_weight4, axes=None) matmul1007: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1332, permute_dims1008, out_dtype="void") add1195: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1007, model_decoder_layers_29_encoder_attn_out_proj_bias4) add1196: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1193, add1195) layer_norm348: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1196, model_decoder_layers_29_final_layer_norm_weight4, model_decoder_layers_29_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1009: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc1_weight4, axes=None) matmul1008: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm348, permute_dims1009, out_dtype="void") add1197: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul1008, model_decoder_layers_29_fc1_bias4) gelu127: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1197) permute_dims1010: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_29_fc2_weight4, axes=None) matmul1009: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu127, permute_dims1010, out_dtype="void") add1198: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1009, model_decoder_layers_29_fc2_bias4) add1199: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1196, add1198) layer_norm349: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1199, model_decoder_layers_30_self_attn_layer_norm_weight4, model_decoder_layers_30_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1011: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_q_proj_weight4, axes=None) matmul1010: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm349, permute_dims1011, out_dtype="void") add1200: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1010, model_decoder_layers_30_self_attn_q_proj_bias4) reshape1333: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1200, R.shape([1, seq_len, 20, 64])) permute_dims1012: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_k_proj_weight4, axes=None) matmul1011: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm349, permute_dims1012, out_dtype="void") reshape1334: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul1011, R.shape([1, seq_len, 20, 64])) permute_dims1013: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_v_proj_weight4, axes=None) matmul1012: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm349, permute_dims1013, out_dtype="void") add1201: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1012, model_decoder_layers_30_self_attn_v_proj_bias4) reshape1335: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1201, R.shape([1, seq_len, 20, 64])) concat94: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1333, reshape1334, reshape1335), axis=2) reshape1336: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat94, R.shape([seq_len, 60, 64])) lv259 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1336), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1337: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv259, R.shape([1, seq_len, 20, 64])) reshape1338: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1337, R.shape([1, seq_len, 1280])) permute_dims1014: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_self_attn_out_proj_weight4, axes=None) matmul1013: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1338, permute_dims1014, out_dtype="void") add1202: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1013, model_decoder_layers_30_self_attn_out_proj_bias4) add1203: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1199, add1202) layer_norm350: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1203, model_decoder_layers_30_encoder_attn_layer_norm_weight4, model_decoder_layers_30_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1015: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_q_proj_weight4, axes=None) matmul1014: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm350, permute_dims1015, out_dtype="void") add1204: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1014, model_decoder_layers_30_encoder_attn_q_proj_bias4) reshape1339: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1204, R.shape([1, seq_len, 20, 64])) reshape1340: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1339, R.shape([seq_len, 20, 64])) lv260 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1340), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1341: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv260, R.shape([1, seq_len, 20, 64])) reshape1342: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1341, R.shape([1, seq_len, 1280])) permute_dims1016: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_encoder_attn_out_proj_weight4, axes=None) matmul1015: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1342, permute_dims1016, out_dtype="void") add1205: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1015, model_decoder_layers_30_encoder_attn_out_proj_bias4) add1206: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1203, add1205) layer_norm351: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1206, model_decoder_layers_30_final_layer_norm_weight4, model_decoder_layers_30_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1017: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc1_weight4, axes=None) matmul1016: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm351, permute_dims1017, out_dtype="void") add1207: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul1016, model_decoder_layers_30_fc1_bias4) gelu128: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1207) permute_dims1018: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_30_fc2_weight4, axes=None) matmul1017: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu128, permute_dims1018, out_dtype="void") add1208: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1017, model_decoder_layers_30_fc2_bias4) add1209: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1206, add1208) layer_norm352: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1209, model_decoder_layers_31_self_attn_layer_norm_weight4, model_decoder_layers_31_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1019: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_q_proj_weight4, axes=None) matmul1018: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm352, permute_dims1019, out_dtype="void") add1210: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1018, model_decoder_layers_31_self_attn_q_proj_bias4) reshape1343: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1210, R.shape([1, seq_len, 20, 64])) permute_dims1020: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_k_proj_weight4, axes=None) matmul1019: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm352, permute_dims1020, out_dtype="void") reshape1344: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(matmul1019, R.shape([1, seq_len, 20, 64])) permute_dims1021: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_v_proj_weight4, axes=None) matmul1020: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm352, permute_dims1021, out_dtype="void") add1211: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1020, model_decoder_layers_31_self_attn_v_proj_bias4) reshape1345: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1211, R.shape([1, seq_len, 20, 64])) concat95: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1343, reshape1344, reshape1345), axis=2) reshape1346: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat95, R.shape([seq_len, 60, 64])) lv261 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1346), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1347: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv261, R.shape([1, seq_len, 20, 64])) reshape1348: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1347, R.shape([1, seq_len, 1280])) permute_dims1022: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_self_attn_out_proj_weight4, axes=None) matmul1021: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1348, permute_dims1022, out_dtype="void") add1212: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1021, model_decoder_layers_31_self_attn_out_proj_bias4) add1213: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1209, add1212) layer_norm353: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1213, model_decoder_layers_31_encoder_attn_layer_norm_weight4, model_decoder_layers_31_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1023: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_q_proj_weight4, axes=None) matmul1022: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(layer_norm353, permute_dims1023, out_dtype="void") add1214: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1022, model_decoder_layers_31_encoder_attn_q_proj_bias4) reshape1349: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(add1214, R.shape([1, seq_len, 20, 64])) reshape1350: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1349, R.shape([seq_len, 20, 64])) lv262 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1350), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) reshape1351: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv262, R.shape([1, seq_len, 20, 64])) reshape1352: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1351, R.shape([1, seq_len, 1280])) permute_dims1024: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_encoder_attn_out_proj_weight4, axes=None) matmul1023: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(reshape1352, permute_dims1024, out_dtype="void") add1215: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1023, model_decoder_layers_31_encoder_attn_out_proj_bias4) add1216: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1213, add1215) layer_norm354: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1216, model_decoder_layers_31_final_layer_norm_weight4, model_decoder_layers_31_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) permute_dims1025: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc1_weight4, axes=None) matmul1024: R.Tensor((1, seq_len, 5120), dtype="float16") = R.matmul(layer_norm354, permute_dims1025, out_dtype="void") add1217: R.Tensor((1, seq_len, 5120), dtype="float16") = R.add(matmul1024, model_decoder_layers_31_fc1_bias4) gelu129: R.Tensor((1, seq_len, 5120), dtype="float16") = R.nn.gelu(add1217) permute_dims1026: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_31_fc2_weight4, axes=None) matmul1025: R.Tensor((1, seq_len, 1280), dtype="float16") = R.matmul(gelu129, permute_dims1026, out_dtype="void") add1218: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(matmul1025, model_decoder_layers_31_fc2_bias4) add1219: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1216, add1218) layer_norm355: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1219, model_decoder_layer_norm_weight4, model_decoder_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) lv263 = R.call_tir(cls.index, (layer_norm355,), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) permute_dims1027: R.Tensor((1280, 51866), dtype="float16") = R.permute_dims(model_decoder_embed_tokens_weight4, axes=None) matmul1026: R.Tensor((1, 1, 51866), dtype="float32") = R.matmul(lv263, permute_dims1027, out_dtype="float32") gv4: R.Tensor((1, 1, 51866), dtype="float32") = matmul1026 R.output(gv4) return gv4 @R.function def renormalize_by_top_p(probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), top_p: R.Tensor(("batch_size",), dtype="float32"), init_pivots: R.Tensor(("batch_size", 3), dtype="float32")) -> R.Tensor(("batch_size", "vocab_size"), dtype="float32"): batch_size = T.int64() vocab_size = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) cls = Module with R.dataflow(): lv6 = R.call_tir(cls.top_p_pivot_cutoff, (probs, top_p, init_pivots), out_sinfo=[R.Tensor((batch_size,), dtype="float32"), R.Tensor((batch_size,), dtype="float32")]) lv7: R.Tensor((batch_size,), dtype="float32") = lv6[0] lv8: R.Tensor((batch_size,), dtype="float32") = lv6[1] gv5 = R.call_tir(cls.top_p_renorm_after_cutoff, (probs, lv7, lv8), out_sinfo=R.Tensor((batch_size, vocab_size), dtype="float32")) R.output(gv5) return gv5 @R.function def sample_with_top_p(sorted_probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), sorted_indices: R.Tensor(("batch_size", "vocab_size"), dtype="int32"), uniform_samples: R.Tensor(("num_samples",), dtype="float32"), sample_indices: R.Tensor(("num_samples",), dtype="int32"), top_p: R.Tensor(("batch_size",), dtype="float32")) -> R.Tensor(("num_samples",), dtype="int32"): num_samples = T.int64() batch_size = T.int64() vocab_size = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) cls = Module with R.dataflow(): sorted_probs_1: R.Tensor((batch_size, vocab_size), dtype="float32") = sorted_probs sorted_indices_1: R.Tensor((batch_size, vocab_size), dtype="int32") = sorted_indices uniform_samples1: R.Tensor((num_samples, 1), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", uniform_samples, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="float32"),)) sample_indices1: R.Tensor((num_samples, 1), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", sample_indices, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="int32"),)) sample_indices2: R.Tensor((batch_size, 1), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", top_p, R.shape([batch_size, 1]), sinfo_args=(R.Tensor((batch_size, 1), dtype="float32"),)) lv3 = R.call_tir(cls.full, R.tuple(), out_sinfo=R.Tensor((batch_size, 1), dtype="int32"), tir_vars=R.shape([vocab_size])) cumsum: R.Tensor((batch_size, vocab_size), dtype="float32") = R.cumsum(sorted_probs_1, axis=1, dtype="void", exclusive=None) lv4 = R.call_tir(cls.get_renorm_prob, (cumsum, sample_indices2, lv3), out_sinfo=R.Tensor((batch_size, 1), dtype="float32")) lv5 = R.call_tir(cls.get_index_from_sorted, (cumsum, sorted_indices_1, lv4, uniform_samples1, sample_indices1), out_sinfo=R.Tensor((num_samples, 1), dtype="int32")) gv2: R.Tensor((num_samples,), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", lv5, R.shape([num_samples]), sinfo_args=(R.Tensor((num_samples,), dtype="int32"),)) R.output(gv2) return gv2 @R.function def sampler_take_probs(unsorted_probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), sorted_indices: R.Tensor(("batch_size", "vocab_size"), dtype="int32"), sample_indices: R.Tensor(("num_samples",), dtype="int32"), sampling_result: R.Tensor(("num_samples",), dtype="int32"), lobprob_offsets: R.Tensor(("num_positions",), dtype="int32")) -> R.Tuple(R.Tensor(("num_samples",), dtype="float32"), R.Tensor(("num_positions",), dtype="float32"), R.Tensor(("num_positions",), dtype="int32")): num_samples = T.int64() num_positions = T.int64() batch_size = T.int64() vocab_size = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) cls = Module with R.dataflow(): gv3 = R.call_tir(cls.sampler_take_probs_tir, (unsorted_probs, sorted_indices, sample_indices, sampling_result, lobprob_offsets), out_sinfo=[R.Tensor((num_samples,), dtype="float32"), R.Tensor((num_positions,), dtype="float32"), R.Tensor((num_positions,), dtype="int32")]) R.output(gv3) return gv3 @R.function def sampler_verify_draft_tokens(draft_probs: R.Tensor(("num_nodes", "vocab_size"), dtype="float32"), draft_tokens: R.Tensor(("num_nodes",), dtype="int32"), model_probs: R.Tensor(("num_nodes", "vocab_size"), dtype="float32"), token_tree_first_child: R.Tensor(("num_nodes",), dtype="int32"), token_tree_next_sibling: R.Tensor(("num_nodes",), dtype="int32"), uniform_samples: R.Tensor(("num_nodes",), dtype="float32"), token_tree_parent_ptr: R.Tensor(("nbatch",), dtype="int32")) -> R.Tuple(R.Tensor(("num_nodes", "vocab_size"), dtype="float32"), R.Tensor(("nbatch",), dtype="int32")): num_nodes = T.int64() vocab_size = T.int64() nbatch = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) cls = Module with R.dataflow(): gv4: R.Tuple(R.Tensor((num_nodes, vocab_size), dtype="float32"), R.Tensor((nbatch,), dtype="int32")) = R.call_tir_inplace(cls.batch_verify_on_gpu_single_kernel, (draft_probs, draft_tokens, model_probs, token_tree_first_child, token_tree_next_sibling, uniform_samples, token_tree_parent_ptr), out_sinfo=[R.Tensor((num_nodes, vocab_size), dtype="float32"), R.Tensor((nbatch,), dtype="int32")], inplace_indices=[2, 6]) R.output(gv4) return gv4 @R.function def softmax_with_temperature(logits: R.Tensor(("batch_size", 1, "vocab_size"), dtype="float32"), temperature: R.Tensor(("batch_size",), dtype="float32")) -> R.Tensor(("batch_size", 1, "vocab_size"), dtype="float32"): batch_size = T.int64() vocab_size = T.int64() R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) cls = Module with R.dataflow(): lv: R.Tensor((batch_size, vocab_size), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", logits, R.shape([batch_size, vocab_size]), sinfo_args=(R.Tensor((batch_size, vocab_size), dtype="float32"),)) lv1 = R.call_tir(cls.chunk_lse, (lv, temperature), out_sinfo=[R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32"), R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32")]) lv2: R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32") = lv1[0] lv3: R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32") = lv1[1] lv4 = R.call_tir(cls.softmax_with_chunked_sum, (lv, temperature, lv2, lv3), out_sinfo=R.Tensor((batch_size, vocab_size), dtype="float32")) gv: R.Tensor((batch_size, 1, vocab_size), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", lv4, R.shape([batch_size, 1, vocab_size]), sinfo_args=(R.Tensor((batch_size, 1, vocab_size), dtype="float32"),)) R.output(gv) return gv