uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
6d3ee5b8-898b-438e-a16f-c87c7ce98c84 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _jagged_dense_flash_attention_bwd_dk_kernel(q_ptr, k_ptr, v_ptr, ab_ptr,
jagged_offsets_ptr, out_ptr, do_ptr, lse_ptr, delta_ptr, dq_ptr, dk_ptr,
dv_ptr, dbias_ptr, max_seq_len, stride_ql, stride_qd, stride_kb,
stride_kd, stride_kt, stride_vl, stride_vd, stride_ab_b, stride_ab_l,
stride_ab_t, stride_ob, stride_ot, stride_od, stride_dk_b, stride_dk_d,
stride_dk_t, stride_do_b, stride_do_t, stride_do_d, D, T: tl.constexpr,
BLOCK_T: tl.constexpr, BLOCK_L: tl.constexpr, BLOCK_D: tl.constexpr,
allow_tf32: tl.constexpr):
pid_t = tl.program_id(0)
pid_b = tl.program_id(1)
begin = tl.load(jagged_offsets_ptr + pid_b)
end = tl.load(jagged_offsets_ptr + pid_b + 1)
seqlen = end - begin
seqlen = tl.minimum(seqlen, max_seq_len)
if seqlen == 0:
return
q_start_ptr = q_ptr + begin * stride_ql
k_start_ptr = k_ptr + pid_b * stride_kb
ab_start_ptr = ab_ptr + pid_b * stride_ab_b
v_start_ptr = v_ptr + begin * stride_vl
do_start_ptr = do_ptr + pid_b * stride_do_b
dk_start_ptr = dk_ptr + pid_b * stride_dk_b
delta_ptrs = delta_ptr + pid_b * T
lse_ptrs = lse_ptr + pid_b * T
offs_t_curr = pid_t * BLOCK_T + tl.arange(0, BLOCK_T)
offs_d = tl.arange(0, BLOCK_D)
k_ptrs = k_start_ptr + offs_d[:, None] * stride_kd + offs_t_curr[None, :
] * stride_kt
do_ptrs = do_start_ptr + offs_t_curr[:, None] * stride_do_t + offs_d[
None, :] * stride_do_d
dk_ptrs = dk_start_ptr + offs_d[:, None] * stride_dk_d + offs_t_curr[
None, :] * stride_dk_t
dk = tl.zeros([BLOCK_D, BLOCK_T], dtype=tl.float32)
k = tl.load(k_ptrs, mask=(offs_t_curr[None, :] < T) & (offs_d[:, None] <
BLOCK_D), other=0.0)
start_l = 0
while start_l < seqlen:
offs_l_curr = start_l + tl.arange(0, BLOCK_L)
q_ptrs = q_start_ptr + offs_l_curr[:, None] * stride_ql + offs_d[
None, :] * stride_qd
q = tl.load(q_ptrs, mask=offs_l_curr[:, None] < seqlen, other=0.0)
v_ptrs = v_start_ptr + offs_l_curr[:, None] * stride_vl + offs_d[
None, :] * stride_vd
v = tl.load(v_ptrs, mask=offs_l_curr[:, None] < seqlen, other=0.0)
qk = tl.zeros([BLOCK_L, BLOCK_T], dtype=tl.float32)
qk = tl.dot(q, k, allow_tf32=allow_tf32)
qk = tl.where((offs_l_curr[:, None] < seqlen) & (offs_t_curr[None,
:] < T), qk, 0.0)
ab_ptrs = ab_start_ptr + offs_l_curr[:, None
] * stride_ab_l + offs_t_curr[None, :] * stride_ab_t
ab = tl.load(ab_ptrs, mask=(offs_l_curr[:, None] < seqlen) & (
offs_t_curr[None, :] < T), other=0.0)
qk = qk + ab
qk_mask = (offs_l_curr[:, None] < seqlen) & (offs_t_curr[None, :] < T)
qk = tl.where(qk_mask, qk, float('-inf'))
lse_t = tl.load(lse_ptrs + offs_t_curr, mask=offs_t_curr < T)
p = tl.exp(qk - lse_t[None, :])
p = tl.where(qk_mask, p, 0.0)
do = tl.load(do_ptrs, mask=offs_t_curr[:, None] < T, other=0.0)
delta = tl.load(delta_ptrs + offs_t_curr, mask=offs_t_curr < T)
dp = tl.trans(tl.dot(do, tl.trans(v), allow_tf32=allow_tf32))
ds = p * (dp - delta[None, :])
dk += tl.dot(tl.trans(q), ds, allow_tf32=allow_tf32)
start_l += BLOCK_L
tl.store(dk_ptrs, dk, mask=offs_t_curr[None, :] < T)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
8c7b91e8-cc8f-40fc-afce-459f1f8214bf | lightning_attn2.py | OpenNLPLab/lightning-attention | lightning_attn/ops/triton/lightning_attn2.py | d7439519541e966084eeaaf3ffd63eecc216f414 | 0 | @triton.jit
def _bwd_intra_kernel(Q, K, V, S, DO, DQ, DK, DV, b: tl.constexpr, h: tl.
constexpr, n: tl.constexpr, d: tl.constexpr, e: tl.constexpr, BLOCK: tl
.constexpr, NUM_BLOCK: tl.constexpr, CBLOCK: tl.constexpr, NUM_CBLOCK:
tl.constexpr):
off_bh = tl.program_id(0)
off_block = tl.program_id(1)
off_h = off_bh % h
qk_offset = off_bh * n * d
v_offset = off_bh * n * e
o_offset = off_bh * n * e
block_offset = off_block * BLOCK + tl.arange(0, BLOCK)
Q_trans_block_ptr = Q + qk_offset + block_offset[None, :] * d + tl.arange(
0, d)[:, None]
K_block_ptr = K + qk_offset + block_offset[:, None] * d + tl.arange(0, d)[
None, :]
V_trans_block_ptr = V + v_offset + block_offset[None, :] * e + tl.arange(
0, e)[:, None]
DQ_block_ptr = DQ + qk_offset + block_offset[:, None] * d + tl.arange(0, d
)[None, :]
DK_trans_block_ptr = DK + qk_offset + block_offset[None, :
] * d + tl.arange(0, d)[:, None]
DV_block_ptr = DV + v_offset + block_offset[:, None] * e + tl.arange(0, e)[
None, :]
DO_block_ptr = DO + o_offset + block_offset[:, None] * e + tl.arange(0, e)[
None, :]
S_block_ptr = S + off_h
s = tl.load(S_block_ptr)
array = tl.arange(0, BLOCK).to(tl.float32)
index = array[:, None] - array[None, :]
s_index = s * index
s_index = tl.where(index >= 0, -s_index, float('-inf'))
diag_decay = tl.exp(s_index)
diag_decay_trans = tl.trans(diag_decay)
k = tl.load(K_block_ptr, mask=block_offset[:, None] < n, other=0.0).to(tl
.float32)
v_trans = tl.load(V_trans_block_ptr, mask=block_offset[None, :] < n,
other=0.0).to(tl.float32)
do = tl.load(DO_block_ptr, mask=block_offset[:, None] < n, other=0.0).to(tl
.float32)
q_trans = tl.load(Q_trans_block_ptr, mask=block_offset[None, :] < n,
other=0.0).to(tl.float32)
dqk = tl.dot(do, v_trans) * diag_decay
dq_intra = tl.dot(dqk, k)
dk_intra_trans = tl.dot(q_trans, dqk)
qk_trans = tl.dot(k, q_trans) * diag_decay_trans
dv_intra = tl.dot(qk_trans, do)
dq = dq_intra
dk_trans = dk_intra_trans
dv = dv_intra
tl.store(DQ_block_ptr, dq.to(DQ_block_ptr.dtype.element_ty), mask=
block_offset[:, None] < n)
tl.store(DK_trans_block_ptr, dk_trans.to(DK_trans_block_ptr.dtype.
element_ty), mask=block_offset[None, :] < n)
tl.store(DV_block_ptr, dv.to(DV_block_ptr.dtype.element_ty), mask=
block_offset[:, None] < n)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/OpenNLPLab/lightning-attention/blob/d7439519541e966084eeaaf3ffd63eecc216f414/lightning_attn/ops/triton/lightning_attn2.py |
4fcdc1d3-596d-431c-b62b-3f6a8a65e5a1 | k_dropout.py | cpuhrsch/torchfused | torchfused/triton/k_dropout.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _drop_and_scale(SEEDS, row, p, offsets, x):
seed = SEEDS + row
random = tl.rand(seed.to(tl.int32), offsets)
x_keep = random > p
zero = 0.0
zero = zero.to(x.dtype)
return tl.where(x_keep, (x / (1 - p)).to(x.dtype), zero)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_dropout.py |
83c7f330-658f-46b1-b1f2-1cd74b5122b4 | triton_welford.py | pytorch-labs/tritonbench | tritonbench/operators/welford/triton_welford.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'XBLOCK': 1, 'RBLOCK': 1024},
num_stages=1, num_warps=8), triton.Config({'XBLOCK': 1, 'RBLOCK': 2048},
num_stages=1, num_warps=8)], key=['xnumel', 'rnumel'])
@triton.jit
def triton_red_fused_native_layer_norm_no_welford(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask, eviction_policy
='evict_last').to(tl.float32)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tmp4
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = rnumel
tmp6 = tmp3 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, None)
_tmp12 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp7 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask, eviction_policy
='evict_last').to(tl.float32)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp8 - tmp6
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = _tmp12 + tmp11
_tmp12 = tmp13
tmp12 = tl.sum(_tmp12, 1)[:, None]
tmp14 = rnumel
tmp15 = tmp12 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp18, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp19 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask,
eviction_policy='evict_first').to(tl.float32)
tmp23 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last').to(
tl.float32)
tmp26 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last').to(
tl.float32)
tmp20 = tmp19.to(tl.float32)
tmp21 = tmp20 - tmp6
tmp22 = tmp21 * tmp18
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 * tmp24
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp25 + tmp27
tmp29 = tmp28.to(tl.float32)
tl.store(out_ptr0 + (r1 + rnumel * x0), tmp29, rmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/welford/triton_welford.py |
c8059c9e-a8e6-4564-86fd-169f523010dc | positional_embedding.py | ai-compiler-study/triton-kernels | triton_kernels/kernels/positional_embedding.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def _rope_fwd(q_ptr, k_ptr, f_ptr, oq_ptr, ok_ptr, stride, d, BLOCK_SIZE:
tl.constexpr):
bh_idx = tl.program_id(0)
s_idx = tl.program_id(1)
q_start_ptr = q_ptr + bh_idx * stride
k_start_ptr = k_ptr + bh_idx * stride
oq_start_ptr = oq_ptr + bh_idx * stride
ok_start_ptr = ok_ptr + bh_idx * stride
d_half = d // 2
col_offsets = tl.arange(0, BLOCK_SIZE)
col_offsets2 = tl.arange(0, BLOCK_SIZE * 2)
f0_ptrs = f_ptr + s_idx * d * 2 + col_offsets2 * 2
f1_ptrs = f_ptr + s_idx * d * 2 + col_offsets2 * 2 + 1
f0 = tl.load(f0_ptrs, mask=col_offsets2 < d, other=0.0).reshape(BLOCK_SIZE,
2)
f1 = tl.load(f1_ptrs, mask=col_offsets2 < d, other=0.0).reshape(BLOCK_SIZE,
2)
q0_ptrs = q_start_ptr + s_idx * d + col_offsets * 2
q1_ptrs = q_start_ptr + s_idx * d + col_offsets * 2 + 1
q0 = tl.load(q0_ptrs, mask=col_offsets < d_half, other=0.0).reshape(
BLOCK_SIZE, 1)
q1 = tl.load(q1_ptrs, mask=col_offsets < d_half, other=0.0).reshape(
BLOCK_SIZE, 1)
k0_ptrs = k_start_ptr + s_idx * d + col_offsets * 2
k1_ptrs = k_start_ptr + s_idx * d + col_offsets * 2 + 1
k0 = tl.load(k0_ptrs, mask=col_offsets < d_half, other=0.0).reshape(
BLOCK_SIZE, 1)
k1 = tl.load(k1_ptrs, mask=col_offsets < d_half, other=0.0).reshape(
BLOCK_SIZE, 1)
oq = f0 * q0 + f1 * q1
ok = f0 * k0 + f1 * k1
oq_ptrs = oq_start_ptr + s_idx * d + col_offsets2
ok_ptrs = ok_start_ptr + s_idx * d + col_offsets2
tl.store(oq_ptrs, oq.reshape(BLOCK_SIZE * 2), mask=col_offsets2 < d)
tl.store(ok_ptrs, ok.reshape(BLOCK_SIZE * 2), mask=col_offsets2 < d)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/kernels/positional_embedding.py |
ae45fb44-d0d9-48b0-8661-e897c1327370 | seqlen_utils.py | Kitsunetic/kitsu | kitsu/nn/seqlen_utils.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def seqlen_to_index_kernel(seqlen_ptr, idx_ptr, BLK: tl.constexpr):
pid = tl.program_id(0)
i = tl.load(seqlen_ptr + pid)
j = tl.load(seqlen_ptr + pid + 1)
idx = tl.arange(0, BLK)
tl.store(idx_ptr + i + idx, idx, mask=idx < j - i)
| {
"Data Type": [
"uint8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/seqlen_utils.py |
510db266-e82a-4bbf-b760-757bb9b99da6 | special.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/special.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def joint_second_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor,
block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.
constexpr):
"""
This Triton implementation includes l=0, 1, 2 within the
same kernel, as it would be a common operation.
"""
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST_00 = 3.87298334620742
CONST_01 = 2.23606797749979
CONST_02 = -1.11803398874989
CONST_03 = 1.93649167310371
CONST_04 = tl.sqrt(3.0)
Y10 = CONST_04 * x
Y11 = CONST_04 * y
Y12 = CONST_04 * z
Y20 = CONST_00 * x * z
Y21 = CONST_00 * x * y
Y23 = CONST_00 * y * z
Y22 = CONST_02 * x * x + CONST_01 * y * y + CONST_02 * z * z
Y24 = -CONST_03 * x * x + CONST_03 * z * z
output_stride = 9
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = output_striding + block_size * output_stride * block_id
tl.store(output_ptr + output_row_offset, 1.0, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y10, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y11, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y12, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y20, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y21, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y22, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y23, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y24, mask=
output_row_offset + 7 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/special.py |
4e1ee352-1659-4bdb-be7f-7b5f64c2a6f8 | triton_fused_vq_attn.py | LouChao98/vqtree | ops/triton_fused_vq_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.jit
def _vq_attn_fwd_inner(acc, l_i, m_i, q, sm_scale, K_block_ptr,
K_VQC_block_ptr, VVQI_block_ptr, V_VQ, stride_vvq_n, CODEBOOK_SIZE: tl.
constexpr, BLOCK_HEADDIM: tl.constexpr, BLOCK_N: tl.constexpr):
for _ in range(0, CODEBOOK_SIZE, BLOCK_N):
k = tl.load(K_block_ptr)
qk = tl.dot(q, k) * (sm_scale * RCP_LN2)
k_vq_cnt = tl.load(K_VQC_block_ptr)
mask = k_vq_cnt != 0.0
qk = tl.where(mask[None, :], qk, NEGINF)
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
acc *= alpha[:, None]
v_vq_index = tl.load(VVQI_block_ptr)
v_ptr = V_VQ + (v_vq_index[:, None] * stride_vvq_n + tl.arange(0,
BLOCK_HEADDIM)[None, :])
v = tl.load(v_ptr)
acc += tl.dot(p.to(v.dtype), v)
l_i = l_i * alpha + tl.sum(p.to(v.dtype) * k_vq_cnt[None, :], axis=1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_VQC_block_ptr = tl.advance(K_VQC_block_ptr, (BLOCK_N,))
VVQI_block_ptr = tl.advance(VVQI_block_ptr, (BLOCK_N,))
return acc, l_i, m_i
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_vq_attn.py |
912d1872-d2e5-4a37-9f11-a8df42e90f66 | y_10.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_10.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def tenth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset <
output_numel)
g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask=
output_row_offset + 7 < output_numel)
g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask=
output_row_offset + 8 < output_numel)
g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask=
output_row_offset + 9 < output_numel)
g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask=
output_row_offset + 10 < output_numel)
g_11 = tl.load(sph_grad_ptr + output_row_offset + 11, mask=
output_row_offset + 11 < output_numel)
g_12 = tl.load(sph_grad_ptr + output_row_offset + 12, mask=
output_row_offset + 12 < output_numel)
g_13 = tl.load(sph_grad_ptr + output_row_offset + 13, mask=
output_row_offset + 13 < output_numel)
g_14 = tl.load(sph_grad_ptr + output_row_offset + 14, mask=
output_row_offset + 14 < output_numel)
g_15 = tl.load(sph_grad_ptr + output_row_offset + 15, mask=
output_row_offset + 15 < output_numel)
g_16 = tl.load(sph_grad_ptr + output_row_offset + 16, mask=
output_row_offset + 16 < output_numel)
g_17 = tl.load(sph_grad_ptr + output_row_offset + 17, mask=
output_row_offset + 17 < output_numel)
g_18 = tl.load(sph_grad_ptr + output_row_offset + 18, mask=
output_row_offset + 18 < output_numel)
g_19 = tl.load(sph_grad_ptr + output_row_offset + 19, mask=
output_row_offset + 19 < output_numel)
g_20 = tl.load(sph_grad_ptr + output_row_offset + 20, mask=
output_row_offset + 20 < output_numel)
CONST000 = 2.0
CONST002 = 4.0
CONST003 = 4.82870805793735
CONST004 = 6.0
CONST005 = 4.9743298563255
CONST006 = 8.0
CONST007 = 4.9743298563255
CONST008 = 10.5521471197994
CONST009 = 3.0
CONST010 = 5.0
CONST011 = 7.0
CONST012 = 13.264879616868
CONST014 = 12.1657520803952
CONST015 = 16.7271353825295
CONST016 = -2030.35546709287
CONST017 = 19.3148322317494
CONST018 = -6131.53904851919
CONST019 = 22.862985426232
CONST020 = 23.213539329519
CONST021 = 24.6216766128653
CONST022 = 17.5869118663323
CONST024 = 28.9722483476241
CONST025 = 33.9852909359329
CONST026 = 33.9852909359329
CONST027 = 35.5238206489124
CONST028 = 6180.7463141598
CONST029 = 38.6296644634988
CONST030 = 39.794638850604
CONST031 = 38.6296644634988
CONST032 = -2007.25624590353
CONST033 = -2007.25624590353
CONST034 = 45.8257569495584
CONST035 = 45.725970852464
CONST037 = 56.3871618715269
CONST038 = 56.2781179722634
CONST039 = -1989.33395633909
CONST040 = -1989.33395633909
CONST041 = 59.691958275906
CONST042 = 66.9085415301178
CONST043 = 69.640617988557
CONST044 = -8121.42186837148
CONST045 = 77.2593289269976
CONST046 = 78.6510608948335
CONST047 = -1969.73412902922
CONST048 = 77.3468749368712
CONST049 = -1969.73412902922
CONST050 = -9.65741611587469
CONST051 = 90.1358837481638
CONST053 = 94.9693240781945
CONST055 = 96.5741611587469
CONST057 = 98.486706451461
CONST058 = 100.362812295177
CONST059 = 101.517773354644
CONST060 = 106.571461946737
CONST061 = 106.571461946737
CONST062 = 109.491768723557
CONST063 = 109.491768723557
CONST064 = 112.774323743054
CONST065 = 112.774323743054
CONST067 = 2165.26701586663
CONST070 = 133.817083060236
CONST071 = 139.281235977114
CONST072 = 139.281235977114
CONST073 = 141.5719096107
CONST074 = 142.09528259565
CONST075 = 147.730059677192
CONST076 = 150.544218442765
CONST077 = 150.074981259369
CONST079 = 2202.22970505534
CONST080 = -3939.46825805844
CONST081 = -5968.00186901728
CONST082 = 176.592751833137
CONST083 = 176.178376404427
CONST085 = 185.708314636152
CONST087 = 196.973412902922
CONST089 = 225.548647486108
CONST090 = 225.548647486108
CONST091 = 4330.53403173327
CONST093 = 244.831037842559
CONST094 = -1804.38917988886
CONST095 = -1804.38917988886
CONST097 = 2317.77986780993
CONST098 = 278.562471954228
CONST100 = 284.190565191299
CONST101 = -1761.78376404427
CONST103 = -9946.66978169547
CONST104 = 9.948659712651
CONST108 = -7878.93651611688
CONST111 = 338.322971229162
CONST112 = 360.877835977772
CONST114 = -1671.37483172537
CONST116 = 2436.42656051144
CONST119 = 393.946825805844
CONST120 = -1648.19901710928
CONST121 = 401.451249180707
CONST122 = 406.071093418574
CONST123 = 412.04975427732
CONST125 = -1624.2843736743
CONST126 = 426.285847786949
CONST127 = 426.285847786948
CONST128 = 2486.66744542387
CONST130 = 451.097294972216
CONST131 = 451.097294972216
CONST132 = 451.097294972215
CONST133 = 6606.68911516602
CONST134 = 6606.68911516602
CONST135 = -1575.78730322338
CONST136 = -1575.78730322338
CONST137 = -3608.77835977772
CONST139 = -1545.18657853995
CONST140 = -1545.18657853995
CONST142 = 535.268332240943
CONST143 = 4635.55973561985
CONST144 = 541.428124558099
CONST145 = -3545.5214322526
CONST146 = 557.124943908456
CONST147 = -3523.56752808854
CONST148 = -5571.24943908456
CONST151 = 15.7883647328499
CONST153 = 2642.67564606641
CONST154 = 2642.67564606641
CONST155 = 2676.34166120471
CONST156 = 629.208487158668
CONST158 = 4727.36190967013
CONST159 = -1392.81235977114
CONST160 = -1390.66792068596
CONST162 = 663.111318779698
CONST163 = -3427.63452979582
CONST164 = -1378.81389032045
CONST165 = 676.645942458323
CONST167 = -1338.17083060236
CONST168 = -1338.17083060236
CONST169 = 721.755671955545
CONST171 = 2785.62471954228
CONST173 = 772.593289269975
CONST175 = 787.893651611688
CONST176 = 787.893651611688
CONST177 = 6.632439808434
CONST178 = 812.142186837148
CONST180 = -1218.21328025572
CONST181 = -1202.92611992591
CONST182 = -1202.92611992591
CONST183 = -3248.56874734859
CONST184 = -3248.56874734859
CONST185 = -5285.35129213281
CONST186 = -1181.84047741753
CONST190 = 2936.30627340712
CONST192 = 2954.60119354383
CONST193 = -1114.24988781691
CONST194 = -16.581099521085
CONST195 = -1101.11485252767
CONST196 = -1081.63060497797
CONST197 = 15.7302121789667
CONST199 = 984.86706451461
CONST202 = -1027.70719569249
CONST203 = -1021.9231747532
CONST204 = -3065.7695242596
CONST205 = -1015.17773354644
CONST206 = 3090.3731570799
CONST207 = -994.666978169547
CONST208 = -984.86706451461
CONST209 = -984.86706451461
CONST210 = -979.324151370235
CONST211 = 1070.53666448189
CONST212 = -979.324151370235
CONST213 = 3151.57460644675
CONST216 = -927.111947123971
CONST217 = -927.11194712397
CONST218 = -5.63871618715269
CONST219 = -2954.60119354383
CONST220 = -902.194589944431
CONST221 = -900.449887556215
CONST222 = -880.891882022136
CONST223 = -880.891882022136
CONST224 = -875.934149788456
CONST226 = -4944.59705132784
CONST228 = 3248.56874734859
CONST229 = -835.687415862684
CONST230 = 1218.21328025572
CONST231 = -824.099508554641
CONST232 = -824.863625092051
CONST233 = -824.863625092051
CONST234 = -812.142186837148
CONST235 = 5352.68332240943
CONST236 = -787.893651611688
CONST237 = -787.893651611688
CONST238 = -772.593289269976
CONST239 = -742.833258544608
CONST240 = -2785.62471954228
CONST241 = -734.07656835178
CONST242 = 1321.3378230332
CONST243 = 1321.3378230332
CONST244 = -706.371007332549
CONST245 = -696.40617988557
CONST246 = 1353.29188491665
CONST247 = -675.337415667161
CONST248 = -675.337415667161
CONST250 = 3427.63452979582
CONST251 = -669.085415301178
CONST252 = -669.085415301178
CONST253 = -669.085415301178
CONST255 = -663.111318779698
CONST256 = -2707.14062279049
CONST258 = 1392.81235977114
CONST259 = 1412.7420146651
CONST260 = -4727.36190967013
CONST261 = -2676.34166120471
CONST262 = -618.07463141598
CONST263 = -611.735236846792
CONST264 = -611.735236846792
CONST265 = 1443.51134391109
CONST266 = -590.920238708766
CONST267 = -10828.562491162
CONST268 = -580.101562026534
CONST269 = -2626.31217203896
CONST272 = 5571.24943908456
CONST273 = -12.8765548211663
CONST274 = -557.124943908456
CONST275 = -557.124943908456
CONST277 = -541.428124558099
CONST278 = -6685.49932690147
CONST279 = 7664.42381064899
CONST280 = -525.262434407792
CONST281 = 1532.8847621298
CONST283 = -497.333489084773
CONST284 = -497.333489084773
CONST285 = -492.433532257305
CONST286 = 1575.78730322338
CONST287 = 1575.78730322338
CONST288 = -463.555973561985
CONST289 = -450.224943778107
CONST290 = -450.224943778107
CONST291 = -450.224943778108
CONST292 = -437.967074894228
CONST293 = -2472.29852566392
CONST294 = 1624.2843736743
CONST295 = -2472.29852566392
CONST296 = -406.071093418574
CONST297 = -393.946825805844
CONST298 = -393.946825805844
CONST299 = -2436.42656051144
CONST300 = -386.296644634988
CONST301 = -386.296644634988
CONST302 = -4456.99955126765
CONST303 = -337.668707833581
CONST304 = -337.668707833581
CONST305 = -331.555659389849
CONST306 = -331.555659389849
CONST307 = -2363.68095483506
CONST309 = -309.03731570799
CONST310 = -4404.45941011068
CONST311 = -309.03731570799
CONST312 = -305.867618423396
CONST313 = -305.867618423396
CONST314 = -305.867618423396
CONST315 = -300.731529981477
CONST316 = 9946.66978169547
CONST318 = -290.050781013267
CONST319 = -284.190565191299
CONST320 = -278.562471954228
CONST321 = -278.562471954228
CONST322 = -2317.77986780993
CONST323 = -10505.2486881558
CONST324 = -251.683394863467
CONST325 = -251.683394863467
CONST326 = -246.216766128653
CONST327 = -244.831037842559
CONST328 = -2285.08968653055
CONST329 = -2285.08968653055
CONST330 = 3862.96644634988
CONST331 = -223.028471767059
CONST332 = -220.222970505534
CONST333 = -206.215906273013
CONST334 = -203.035546709287
CONST335 = -196.973412902922
CONST336 = -196.973412902922
CONST337 = -182.903883409856
CONST338 = -2228.49977563382
CONST340 = 16.4144510752435
CONST341 = 3939.46825805844
CONST342 = 3939.46825805844
CONST343 = -154.518657853995
CONST344 = -154.518657853995
CONST345 = -150.074981259369
CONST346 = -147.730059677191
CONST347 = -146.815313670356
CONST348 = -142.09528259565
CONST349 = -131.315608601948
CONST350 = -131.315608601948
CONST351 = -130.52285145597
CONST352 = -125.841697431734
CONST353 = -125.841697431734
CONST354 = -112.556235944527
CONST355 = -103.107953136506
CONST356 = -101.517773354644
CONST357 = 1949.9373036796
CONST358 = -98.486706451461
CONST359 = -98.486706451461
CONST360 = -2141.07332896377
CONST361 = -2141.07332896377
CONST362 = -92.854157318076
CONST363 = -88.2963759165686
CONST366 = -77.3468749368713
CONST367 = 8121.42186837148
CONST369 = -67.6645942458323
CONST372 = -59.691958275906
CONST373 = -49.2433532257305
CONST374 = -49.2433532257305
CONST375 = -45.1097294972216
CONST376 = -45.1097294972216
CONST377 = -42.2085884791976
CONST378 = -27.2034486491732
CONST379 = -24.6216766128653
CONST380 = -22.862985426232
CONST381 = -19.7354559160624
CONST383 = -17.5869118663323
CONST384 = -16.4144510752435
CONST385 = -16.0956935264578
CONST386 = -14.5025390506634
CONST388 = -16.581099521085
CONST389 = -15.7883647328499
CONST390 = -14.0695294930659
CONST391 = -11.2774323743054
CONST392 = -11.2774323743054
CONST393 = -13.264879616868
CONST394 = -6.632439808434
CONST395 = -5.63871618715269
CONST396 = -4.82870805793735
CONST397 = -3.21913870529156
CONST398 = -11.2774323743054
VAR05 = x * x * x * x * x
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR01 = VAR05 * VAR06
VAR02 = VAR06 * VAR06
VAR03 = VAR06 * VAR07
VAR04 = VAR07 * VAR07
VAR14 = y * y * y * y * y
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR10 = VAR14 * VAR15
VAR11 = VAR15 * VAR15
VAR12 = VAR15 * VAR16
VAR13 = VAR16 * VAR16
VAR23 = z * z * z * z * z
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
VAR19 = VAR23 * VAR24
VAR20 = VAR24 * VAR24
VAR21 = VAR24 * VAR25
VAR22 = VAR25 * VAR25
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += g_0 * (CONST093 * VAR02 * z + CONST210 * VAR08 * VAR21 +
CONST250 * VAR06 * VAR23 + CONST328 * VAR04 * VAR25 - CONST378 * VAR19
) + g_1 * y * (CONST062 * VAR20 + CONST063 * VAR02 + CONST204 *
VAR04 * VAR26 + CONST204 * VAR08 * VAR22 + CONST279 * VAR06 * VAR24
) + g_10 * (CONST000 * x * (CONST089 * VAR17 * VAR22 + CONST169 *
VAR13 * VAR26 + CONST220 * VAR15 * VAR24 + CONST355 * VAR11 +
CONST395 * VAR20) + CONST002 * VAR07 * (CONST111 * VAR17 * VAR24 +
CONST112 * VAR13 + CONST220 * VAR15 * VAR26 + CONST392 * VAR22) +
CONST004 * VAR05 * (CONST090 * VAR17 * VAR26 + CONST315 * VAR15 +
CONST392 * VAR24) + CONST006 * VAR03 * (CONST037 * VAR17 + CONST218 *
VAR26) + CONST391 * VAR01) + g_11 * (CONST070 * VAR21 * x * y +
VAR23 * (CONST121 * VAR07 * y + CONST168 * VAR16 * x) + VAR25 * (
CONST121 * VAR05 * y + CONST261 * VAR07 * VAR16 - CONST361 * VAR14 *
x) + z * (CONST070 * VAR03 * y + CONST167 * VAR05 * VAR16 +
CONST263 * VAR12 * x - CONST361 * VAR07 * VAR14)) + g_12 * (
CONST000 * x * (CONST003 * VAR20 - CONST301 * VAR15 * VAR24 +
CONST343 * VAR17 * VAR22 + CONST363 * VAR11) + CONST002 * VAR07 * (
CONST123 * VAR13 + CONST300 * VAR15 * VAR26 - CONST397 * VAR22) +
CONST004 * VAR05 * (CONST301 * VAR15 - CONST344 * VAR17 * VAR26 +
CONST397 * VAR24) + CONST006 * VAR03 * (CONST045 * VAR17 + CONST396 *
VAR26) + CONST385 * VAR01) + g_13 * (CONST221 * VAR12 * x * z +
VAR14 * (-CONST260 * VAR07 * z + CONST286 * VAR25 * x) + VAR16 * (
CONST080 * VAR07 * VAR25 + CONST145 * VAR05 * z + CONST297 * VAR23 *
x) + y * (-CONST237 * VAR05 * VAR25 - CONST297 * VAR07 * VAR23 -
CONST298 * VAR03 * z)) + g_14 * (CONST000 * x * (CONST005 * VAR20 -
CONST159 * VAR15 * VAR24 + CONST193 * VAR13 * VAR26 + CONST320 *
VAR17 * VAR22) + CONST002 * VAR07 * (CONST020 * VAR22 + CONST085 *
VAR13 + CONST245 * VAR17 * VAR24 + CONST258 * VAR15 * VAR26) +
CONST004 * VAR05 * (CONST020 * VAR24 + CONST320 * VAR15 + CONST320 *
VAR17 * VAR26) + CONST006 * VAR03 * (CONST007 * VAR26 + CONST043 *
VAR17) + CONST388 * VAR01) + g_15 * (VAR14 * (-CONST147 * VAR07 * z +
CONST147 * VAR25 * x) + VAR16 * (CONST153 * VAR23 * x + CONST190 *
VAR07 * VAR25 + CONST310 * VAR05 * z) + y * (CONST156 * VAR03 * z +
CONST222 * VAR07 * VAR23 + CONST324 * VAR21 * x)) + g_16 * (
CONST000 * x * (CONST047 * VAR15 * VAR24 + CONST175 * VAR17 * VAR22 +
CONST380 * VAR20) + CONST002 * VAR07 * (-CONST047 * VAR15 * VAR26 +
CONST379 * VAR22) + CONST004 * VAR05 * (CONST021 * VAR24 + CONST236 *
VAR17 * VAR26 + CONST349 * VAR15) + CONST006 * VAR03 * (CONST019 *
VAR26 + CONST038 * VAR17) + CONST383 * VAR01) + g_17 * (VAR16 * (
CONST183 * VAR23 * x + CONST184 * VAR05 * z - CONST267 * VAR07 *
VAR25) + y * (CONST178 * VAR03 * z + CONST234 * VAR07 * VAR23 -
CONST268 * VAR21 * x + CONST299 * VAR05 * VAR25)) + g_18 * (
CONST060 * VAR20 * x + CONST126 * VAR03 * VAR26 + CONST283 * VAR05 *
VAR24 + CONST305 * VAR07 * VAR22 + CONST381 * VAR01 + VAR17 * (
CONST039 * VAR22 * x + CONST081 * VAR05 * VAR26 + CONST316 * VAR07 *
VAR24 - CONST319 * VAR03)) + g_19 * y * (CONST018 * VAR05 * VAR25 -
CONST018 * VAR07 * VAR23 - CONST224 * VAR03 * z + CONST224 * VAR21 * x
) + g_2 * (CONST074 * VAR02 * z + CONST100 * VAR08 * VAR21 +
CONST255 * VAR04 * VAR25 + CONST389 * VAR19 + VAR17 * (CONST040 *
VAR04 * z + CONST081 * VAR08 * VAR23 - CONST103 * VAR06 * VAR25 -
CONST319 * VAR21)) + g_20 * (CONST163 * VAR05 * VAR24 - CONST212 *
VAR03 * VAR26 + CONST327 * VAR20 * x - CONST329 * VAR07 * VAR22 +
CONST378 * VAR01) + g_3 * (VAR16 * (CONST044 * VAR08 * VAR24 +
CONST144 * VAR22 + CONST277 * VAR04 + CONST367 * VAR06 * VAR26) + y *
(CONST016 * VAR04 * VAR26 - CONST205 * VAR06 * VAR24 + CONST230 *
VAR08 * VAR22 - CONST351 * VAR02 + CONST356 * VAR20)) + g_4 * (
CONST008 * VAR19 + CONST009 * VAR08 * (CONST175 * VAR17 * VAR23 +
CONST269 * VAR15 * VAR25 + CONST390 * VAR21) + CONST010 * VAR06 * (
CONST175 * VAR15 * z + CONST176 * VAR17 * VAR25 + CONST373 * VAR23) +
CONST011 * VAR04 * (CONST303 * VAR17 * z + CONST390 * VAR25) +
CONST053 * VAR02 * z + CONST175 * VAR15 * VAR23 + CONST304 * VAR17 *
VAR21) + g_5 * (VAR14 * (CONST185 * VAR08 * VAR26 - CONST222 *
VAR06 - CONST223 * VAR24) + VAR16 * (CONST079 * VAR08 * VAR24 +
CONST133 * VAR06 * VAR26 + CONST202 * VAR04 + CONST241 * VAR22) + y *
(CONST046 * VAR20 + CONST073 * VAR02 + CONST195 * VAR06 * VAR24 +
CONST222 * VAR04 * VAR26)) + g_6 * (CONST009 * VAR08 * (CONST098 *
VAR17 * VAR23 + CONST239 * VAR13 * z + CONST393 * VAR21) + CONST010 *
VAR06 * (-CONST193 * VAR15 * z + CONST320 * VAR17 * VAR25) +
CONST011 * VAR04 * (CONST012 * VAR25 + CONST321 * VAR17 * z) +
CONST041 * VAR02 * z + CONST098 * VAR17 * VAR21 + CONST193 * VAR15 *
VAR23 - CONST239 * VAR13 * VAR25 + CONST394 * VAR19) + g_7 * (VAR12 *
(CONST289 * VAR08 - CONST290 * VAR26) + VAR14 * (-CONST049 * VAR06 +
CONST186 * VAR24 + CONST307 * VAR08 * VAR26) + VAR16 * (CONST164 *
VAR04 + CONST192 * VAR08 * VAR24 + CONST199 * VAR06 * VAR26 -
CONST266 * VAR22) + y * (CONST075 * VAR02 + CONST285 * VAR06 *
VAR24 + CONST297 * VAR08 * VAR22 + CONST374 * VAR20)) + g_8 * (
CONST009 * VAR08 * (-CONST140 * VAR15 * VAR25 + CONST231 * VAR13 *
z - CONST273 * VAR21 + CONST288 * VAR17 * VAR23) + CONST010 * VAR06 *
(CONST017 * VAR23 + CONST173 * VAR15 * z + CONST288 * VAR17 * VAR25
) + CONST011 * VAR04 * (-CONST273 * VAR25 + CONST344 * VAR17 * z) +
CONST024 * VAR02 * z + CONST082 * VAR11 * z + CONST173 * VAR15 *
VAR23 + CONST231 * VAR13 * VAR25 + CONST344 * VAR17 * VAR21 -
CONST397 * VAR19) + g_9 * (CONST009 * VAR08 * (CONST042 * VAR22 * y +
CONST211 * VAR14 * VAR26 + CONST251 * VAR16 * VAR24 + CONST312 *
VAR12) + CONST010 * VAR06 * (CONST058 * VAR24 * y + CONST142 *
VAR14 + CONST252 * VAR16 * VAR26) + CONST011 * VAR04 * (CONST042 *
VAR26 * y + CONST331 * VAR16) + CONST015 * VAR20 * y + CONST025 *
VAR10 + CONST076 * VAR02 * y + CONST142 * VAR14 * VAR24 + CONST312 *
VAR12 * VAR26 + CONST331 * VAR16 * VAR22)
g_y += CONST000 * g_18 * y * (CONST027 * VAR02 + CONST027 * VAR20 +
CONST128 * VAR06 * VAR24 + CONST207 * VAR04 * VAR26 + CONST207 *
VAR08 * VAR22) + CONST000 * g_2 * y * (-CONST039 * VAR05 * VAR25 +
CONST039 * VAR07 * VAR23 + CONST319 * VAR03 * z - CONST319 * VAR21 * x
) + g_1 * (CONST014 * VAR01 + CONST062 * VAR20 * x + CONST203 *
VAR07 * VAR22 + CONST281 * VAR05 * VAR24 + CONST292 * VAR03 * VAR26
) + g_10 * (CONST034 * VAR10 + CONST064 * VAR20 * y + CONST065 *
VAR02 * y + CONST067 * VAR14 * VAR24 + CONST182 * VAR16 * VAR22 +
CONST233 * VAR12 * VAR26 + VAR04 * (CONST131 * VAR26 * y + CONST181 *
VAR16) + VAR06 * (CONST067 * VAR14 + CONST137 * VAR16 * VAR26 +
CONST165 * VAR24 * y) + VAR08 * (CONST091 * VAR14 * VAR26 +
CONST130 * VAR22 * y + CONST137 * VAR16 * VAR24 + CONST232 * VAR12)
) + g_11 * (CONST015 * VAR19 + VAR21 * (CONST042 * VAR08 + CONST253 *
VAR17) + VAR23 * (CONST033 * VAR08 * VAR17 + CONST058 * VAR06 +
CONST155 * VAR15) + VAR25 * (CONST032 * VAR06 * VAR17 + CONST042 *
VAR04 + CONST235 * VAR08 * VAR15 + CONST361 * VAR13) + z * (
CONST015 * VAR02 + CONST155 * VAR06 * VAR15 + CONST253 * VAR04 *
VAR17 - CONST312 * VAR11 + CONST360 * VAR08 * VAR13)) + g_12 * (-
CONST140 * VAR16 * VAR22 - CONST244 * VAR12 * VAR26 + CONST293 *
VAR14 * VAR24 + CONST343 * VAR20 * y - CONST344 * VAR02 * y + VAR04 *
(CONST140 * VAR16 - CONST311 * VAR26 * y) + VAR06 * (CONST139 *
VAR16 * VAR26 - CONST295 * VAR14) + VAR08 * (-CONST140 * VAR16 *
VAR24 + CONST244 * VAR12 + CONST309 * VAR22 * y)) + g_13 * (
CONST009 * VAR17 * (CONST208 * VAR06 * VAR25 + CONST266 * VAR04 * z +
CONST335 * VAR08 * VAR23 - CONST336 * VAR21) + CONST010 * VAR15 * (
CONST176 * VAR08 * VAR25 - CONST186 * VAR06 * z + CONST298 * VAR23) +
CONST011 * VAR13 * (CONST077 * VAR25 + CONST290 * VAR08 * z) -
CONST350 * VAR04 * VAR25 - CONST358 * VAR06 * VAR23 - CONST374 *
VAR02 * z + CONST384 * VAR19) + g_14 * (CONST071 * VAR02 * y +
CONST072 * VAR20 * y - CONST193 * VAR14 * VAR24 + CONST193 * VAR16 *
VAR22 + VAR04 * (CONST193 * VAR16 + CONST274 * VAR26 * y) + VAR06 *
(CONST159 * VAR24 * y - CONST193 * VAR14 + CONST272 * VAR16 * VAR26
) + VAR08 * (-CONST148 * VAR16 * VAR24 + CONST274 * VAR22 * y +
CONST278 * VAR14 * VAR26)) + g_15 * (CONST009 * VAR17 * (CONST241 *
VAR04 * z - CONST241 * VAR06 * VAR25 + CONST242 * VAR08 * VAR23 +
CONST347 * VAR21) + CONST010 * VAR15 * (CONST083 * VAR23 + CONST101 *
VAR08 * VAR25 - CONST223 * VAR06 * z) + CONST046 * VAR02 * z +
CONST197 * VAR19 + CONST332 * VAR06 * VAR23 + CONST352 * VAR08 * VAR21
) + g_16 * (-CONST108 * VAR06 * VAR16 * VAR26 - CONST280 * VAR16 *
VAR22 - CONST354 * VAR02 * y + CONST354 * VAR20 * y + VAR04 * (
CONST135 * VAR26 * y + CONST280 * VAR16) + VAR08 * (CONST108 *
VAR16 * VAR24 + CONST287 * VAR22 * y)) + g_17 * (CONST009 * VAR17 *
(CONST048 * VAR21 + CONST125 * VAR08 * VAR23 - CONST256 * VAR06 *
VAR25 + CONST277 * VAR04 * z) + CONST059 * VAR02 * z + CONST296 *
VAR04 * VAR25 - CONST318 * VAR08 * VAR21 + CONST334 * VAR06 * VAR23 +
CONST386 * VAR19) + g_19 * (CONST014 * VAR19 + CONST062 * VAR02 * z +
CONST203 * VAR04 * VAR25 + CONST281 * VAR06 * VAR23 + CONST292 *
VAR08 * VAR21) + g_3 * (CONST009 * VAR17 * (CONST144 * VAR22 * x +
CONST256 * VAR07 * VAR24 + CONST294 * VAR05 * VAR26 + CONST366 *
VAR03) + CONST122 * VAR07 * VAR22 + CONST318 * VAR03 * VAR26 -
CONST334 * VAR05 * VAR24 + CONST356 * VAR20 * x - CONST386 * VAR01
) + g_4 * (CONST248 * VAR03 * y * z + VAR05 * (CONST213 * VAR16 * z +
CONST286 * VAR25 * y) + VAR07 * (CONST287 * VAR23 * y + CONST323 *
VAR16 * VAR25) + x * (CONST213 * VAR16 * VAR23 + CONST247 * VAR21 * y)
) + g_5 * (CONST009 * VAR17 * (-CONST241 * VAR07 * VAR24 + CONST241 *
VAR22 * x + CONST243 * VAR05 * VAR26 + CONST347 * VAR03) + CONST010 *
VAR15 * (CONST083 * VAR05 + CONST101 * VAR07 * VAR26 - CONST223 *
VAR24 * x) + CONST046 * VAR20 * x + CONST197 * VAR01 + CONST332 *
VAR05 * VAR24 + CONST353 * VAR03 * VAR26) + g_6 * (CONST275 * VAR03 *
y * z + VAR05 * (CONST274 * VAR25 * y - CONST302 * VAR16 * z) +
VAR07 * (CONST146 * VAR23 * y + CONST302 * VAR14 * z) + x * (
CONST146 * VAR21 * y - CONST302 * VAR14 * VAR25 + CONST302 * VAR16 *
VAR23)) + g_7 * (CONST009 * VAR17 * (CONST087 * VAR05 * VAR26 -
CONST209 * VAR07 * VAR24 - CONST266 * VAR22 * x + CONST336 * VAR03) +
CONST010 * VAR15 * (CONST186 * VAR24 * x + CONST237 * VAR07 * VAR26 -
CONST298 * VAR05) + CONST011 * VAR13 * (-CONST290 * VAR26 * x +
CONST345 * VAR07) + CONST340 * VAR01 + CONST350 * VAR07 * VAR22 +
CONST358 * VAR05 * VAR24 + CONST374 * VAR20 * x) + g_8 * (CONST311 *
VAR03 * y * z + VAR05 * (CONST206 * VAR16 * z + CONST216 * VAR25 *
y) + VAR07 * (CONST028 * VAR16 * VAR25 + CONST216 * VAR23 * y +
CONST226 * VAR14 * z) + x * (CONST206 * VAR16 * VAR23 + CONST226 *
VAR14 * VAR25 + CONST259 * VAR12 * z + CONST311 * VAR21 * y)) + g_9 * (
CONST015 * VAR01 + VAR03 * (CONST042 * VAR26 + CONST253 * VAR17) +
VAR05 * (CONST033 * VAR17 * VAR26 + CONST058 * VAR24 + CONST155 *
VAR15) + VAR07 * (CONST032 * VAR17 * VAR24 + CONST042 * VAR22 +
CONST235 * VAR15 * VAR26 + CONST361 * VAR13) + x * (CONST015 *
VAR20 + CONST155 * VAR15 * VAR24 + CONST253 * VAR17 * VAR22 -
CONST314 * VAR11 + CONST361 * VAR13 * VAR26))
g_z += g_0 * (CONST093 * VAR20 * x + CONST210 * VAR03 * VAR26 +
CONST250 * VAR05 * VAR24 + CONST328 * VAR07 * VAR22 - CONST378 * VAR01
) + g_1 * y * (-CONST018 * VAR05 * VAR25 + CONST018 * VAR07 * VAR23 +
CONST224 * VAR03 * z - CONST224 * VAR21 * x) + g_10 * (CONST095 *
VAR15 * VAR23 + CONST132 * VAR17 * VAR21 + CONST265 * VAR13 * VAR25 +
CONST333 * VAR11 * z + CONST391 * VAR19 + CONST398 * VAR02 * z +
VAR04 * (CONST131 * VAR17 * z + CONST376 * VAR25) + VAR06 * (
CONST094 * VAR15 * z + CONST246 * VAR17 * VAR25 + CONST369 * VAR23) +
VAR08 * (CONST137 * VAR15 * VAR25 + CONST246 * VAR17 * VAR23 +
CONST265 * VAR13 * z + CONST375 * VAR21)) + g_11 * (CONST009 *
VAR26 * (CONST042 * VAR04 * y + CONST211 * VAR08 * VAR14 + CONST251 *
VAR06 * VAR16 + CONST313 * VAR12) + CONST010 * VAR24 * (CONST058 *
VAR06 * y + CONST142 * VAR14 + CONST252 * VAR08 * VAR16) + CONST011 *
VAR22 * (CONST042 * VAR08 * y + CONST331 * VAR16) + CONST015 *
VAR02 * y + CONST026 * VAR10 + CONST076 * VAR20 * y + CONST142 *
VAR06 * VAR14 + CONST314 * VAR08 * VAR12 + CONST331 * VAR04 * VAR16
) + g_12 * (CONST050 * VAR02 * z + CONST082 * VAR11 * z + CONST097 *
VAR15 * VAR23 + CONST120 * VAR13 * VAR25 + CONST262 * VAR17 * VAR21 -
CONST385 * VAR19 + VAR04 * (CONST273 * VAR25 - CONST311 * VAR17 * z
) + VAR06 * (CONST017 * VAR23 + CONST238 * VAR15 * z) + VAR08 * (
CONST029 * VAR21 - CONST140 * VAR15 * VAR25 + CONST217 * VAR17 * VAR23)
) + g_13 * (VAR12 * (CONST290 * VAR08 - CONST290 * VAR26) + VAR14 *
(CONST049 * VAR24 - CONST186 * VAR06 - CONST307 * VAR08 * VAR26) +
VAR16 * (-CONST164 * VAR22 + CONST209 * VAR08 * VAR24 + CONST219 *
VAR06 * VAR26 + CONST266 * VAR04) + y * (-CONST285 * VAR06 * VAR24 -
CONST297 * VAR04 * VAR26 + CONST346 * VAR20 - CONST374 * VAR02)
) + g_14 * (CONST104 * VAR02 * z + CONST114 * VAR15 * VAR23 +
CONST146 * VAR17 * VAR21 + CONST194 * VAR19 - CONST239 * VAR13 *
VAR25 + VAR04 * (CONST274 * VAR17 * z - CONST362 * VAR25) + VAR06 *
(CONST072 * VAR23 + CONST171 * VAR15 * z + CONST240 * VAR17 * VAR25
) + VAR08 * (CONST030 * VAR21 + CONST114 * VAR17 * VAR23 - CONST148 *
VAR15 * VAR25 + CONST338 * VAR13 * z)) + g_15 * (VAR14 * (CONST185 *
VAR08 * VAR26 - CONST222 * VAR24 - CONST223 * VAR06) + VAR16 * (
CONST079 * VAR06 * VAR26 + CONST134 * VAR08 * VAR24 + CONST202 *
VAR22 + CONST241 * VAR04) + y * (CONST046 * VAR02 + CONST073 *
VAR20 + CONST195 * VAR06 * VAR24 + CONST223 * VAR08 * VAR22)
) + g_16 * (CONST022 * VAR19 + CONST035 * VAR02 * z + CONST175 *
VAR15 * VAR23 + CONST291 * VAR17 * VAR21 + VAR04 * (CONST057 *
VAR25 + CONST135 * VAR17 * z) + VAR06 * (CONST341 * VAR15 * z +
CONST346 * VAR23) + VAR08 * (CONST108 * VAR15 * VAR25 + CONST158 *
VAR17 * VAR23 + CONST337 * VAR21)) + g_17 * (VAR16 * (-CONST044 *
VAR06 * VAR26 + CONST044 * VAR08 * VAR24 + CONST144 * VAR22 +
CONST277 * VAR04) + y * (-CONST016 * VAR08 * VAR22 + CONST059 *
VAR02 + CONST180 * VAR04 * VAR26 + CONST205 * VAR06 * VAR24 +
CONST351 * VAR20)) + g_18 * (CONST061 * VAR02 * z + CONST127 *
VAR08 * VAR21 + CONST284 * VAR06 * VAR23 + CONST306 * VAR04 * VAR25 +
CONST381 * VAR19 + VAR17 * (CONST039 * VAR04 * z + CONST081 * VAR08 *
VAR23 + CONST316 * VAR06 * VAR25 - CONST319 * VAR21)) + g_19 * y * (
CONST062 * VAR02 + CONST063 * VAR20 + CONST204 * VAR04 * VAR26 +
CONST204 * VAR08 * VAR22 + CONST279 * VAR06 * VAR24) + g_2 * (
CONST151 * VAR01 + CONST162 * VAR07 * VAR22 + CONST319 * VAR03 *
VAR26 + CONST348 * VAR20 * x + VAR17 * (-CONST040 * VAR22 * x -
CONST081 * VAR05 * VAR26 + CONST103 * VAR07 * VAR24 + CONST319 * VAR03)
) + g_20 * (-CONST163 * VAR06 * VAR23 + CONST212 * VAR08 * VAR21 -
CONST327 * VAR02 * z + CONST329 * VAR04 * VAR25 - CONST378 * VAR19
) + g_3 * (VAR16 * (-CONST183 * VAR23 * x + CONST228 * VAR05 * z +
CONST267 * VAR07 * VAR25) + y * (CONST116 * VAR07 * VAR23 -
CONST234 * VAR05 * VAR25 + CONST234 * VAR21 * x + CONST268 * VAR03 * z)
) + g_4 * (CONST008 * VAR01 + VAR03 * (CONST303 * VAR17 + CONST377 *
VAR26) + VAR05 * (CONST175 * VAR15 - CONST307 * VAR17 * VAR26 +
CONST326 * VAR24) + VAR07 * (CONST108 * VAR15 * VAR26 + CONST341 *
VAR17 * VAR24 + CONST359 * VAR22) + x * (CONST053 * VAR20 +
CONST307 * VAR17 * VAR22 + CONST341 * VAR15 * VAR24)) + g_5 * (
VAR14 * (CONST147 * VAR07 * z - CONST147 * VAR25 * x) + VAR16 * (
CONST154 * VAR05 * z + CONST190 * VAR07 * VAR25 + CONST310 * VAR23 *
x) + y * (CONST156 * VAR21 * x + CONST222 * VAR05 * VAR25 +
CONST325 * VAR03 * z)) + g_6 * (CONST177 * VAR01 + VAR03 * (
CONST030 * VAR26 + CONST321 * VAR17) + VAR05 * (-CONST193 * VAR15 +
CONST229 * VAR17 * VAR26) + VAR07 * (CONST239 * VAR13 + CONST258 *
VAR17 * VAR24 + CONST362 * VAR22) + x * (CONST148 * VAR15 * VAR24 -
CONST338 * VAR13 * VAR26 + CONST357 * VAR17 * VAR22 + CONST372 * VAR20)
) + g_7 * (-CONST221 * VAR12 * x * z + VAR14 * (CONST136 * VAR07 *
z + CONST260 * VAR25 * x) + VAR16 * (CONST119 * VAR05 * z -
CONST145 * VAR23 * x + CONST342 * VAR07 * VAR25) + y * (CONST237 *
VAR07 * VAR23 + CONST297 * VAR05 * VAR25 + CONST298 * VAR21 * x)
) + g_8 * (-CONST397 * VAR01 + VAR03 * (CONST031 * VAR26 + CONST344 *
VAR17) + VAR05 * (CONST055 * VAR24 + CONST160 * VAR17 * VAR26 +
CONST173 * VAR15) + VAR07 * (CONST051 * VAR22 + CONST143 * VAR15 *
VAR26 + CONST231 * VAR13 + CONST322 * VAR17 * VAR24) + x * (
CONST024 * VAR20 + CONST082 * VAR11 + CONST196 * VAR17 * VAR22 +
CONST295 * VAR13 * VAR26 + CONST330 * VAR15 * VAR24)) + g_9 * (
CONST070 * VAR03 * y * z + VAR05 * (CONST121 * VAR25 * y + CONST168 *
VAR16 * z) + VAR07 * (CONST121 * VAR23 * y + CONST261 * VAR16 *
VAR25 - CONST361 * VAR14 * z) + x * (CONST070 * VAR21 * y +
CONST167 * VAR16 * VAR23 + CONST264 * VAR12 * z - CONST361 * VAR14 *
VAR25))
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_10.py |
de16e6fe-5490-4908-ad00-f28cbf3e7e54 | dropout.py | dame-cell/Triformer | triformer/dropout.py | 0712537d576166b93fa09aa9509b2661b9ed8a68 | 0 | @triton.jit
def _seeded_dropout(x_ptr, output_ptr, n_elements, p, seed, BLOCK_SIZE: tl.
constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE * 4
offset = block_start + tl.arange(0, BLOCK_SIZE)
r0, r1, r2, r3 = tl.random.rand4x(seed, offset)
scale = 1.0 / (1.0 - p)
for i in tl.static_range(4):
curr_offset = offset + BLOCK_SIZE * i
mask = curr_offset < n_elements
x = tl.load(x_ptr + curr_offset, mask=mask)
r = tl.where(i == 0, r0, tl.where(i == 1, r1, tl.where(i == 2, r2, r3))
)
keep = r > p
output = tl.where(keep, x * scale, 0.0)
tl.store(output_ptr + curr_offset, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/dropout.py |
732ba6cd-e311-45e1-a903-5a7a6189d148 | fused_kl_div.py | sustcsonglin/flash-linear-attention | fla/modules/fused_kl_div.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def elementwise_mul_kernel(x, g, N: tl.constexpr, B: tl.constexpr):
"""
This function multiplies each element of the tensor pointed by x with the value pointed by g.
The multiplication is performed in-place on the tensor pointed by x.
Parameters:
x:
Pointer to the input tensor.
g:
Pointer to the gradient output value.
N (int):
The number of columns in the input tensor.
B (int):
The block size for Triton operations.
"""
i_x = tl.program_id(0).to(tl.int64)
o_x = i_x * B + tl.arange(0, B)
b_g = tl.load(g)
b_x = tl.load(x + o_x, mask=o_x < N)
tl.store(x + o_x, b_x * b_g, mask=o_x < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_kl_div.py |
1db7555b-32ef-4cc2-9217-8ecf0ada9c85 | cross_entropy_loss_kernels.py | BobMcDear/attorch | attorch/cross_entropy_loss_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])})
@triton.jit
def cross_entropy_loss_backward_kernel(output_grad_pointer, target_pointer,
input_pointer, weight_pointer, sum_weights_pointer, input_grad_pointer,
batch_dim, feat_dim, input_batch_stride, input_feat_stride,
input_grad_batch_stride, input_grad_feat_stride, weighted: tl.constexpr,
BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_FEAT: tl.constexpr):
"""
Calculates the input gradient of cross entropy loss.
Args:
output_grad_pointer: Pointer to the loss's output gradients.
The output gradient must be a scalar.
target_pointer: Pointer to the target.
The target must be of shape [batch_dim].
input_pointer: Pointer to the input.
The input must be of shape [batch_dim, feat_dim].
weight_pointer: Pointer to an optional class weight vector.
The class weight vector, if provided, must be of shape [feat_dim].
sum_weights_pointer: Pointer to the sum of the class weights if the classes were weighed.
The sum of weights must be a scalar.
input_grad_pointer: Pointer to a container the input's gradients are written to.
The container must be of shape [batch_dim, feat_dim].
batch_dim: Batch dimension.
feat_dim: Dimensionality of the features.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
input_grad_batch_stride: Stride necessary to jump one element along the
input gradient container's batch dimension.
input_grad_feat_stride: Stride necessary to jump one element along the
input gradient container's feature dimension.
weighted: Flag for weighing each class.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_FEAT: Block size across the feature dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
feat_offset = tl.arange(0, BLOCK_SIZE_FEAT)
batch_mask = batch_offset < batch_dim
feat_mask = feat_offset < feat_dim
input_pointer += input_batch_stride * batch_offset[:, None
] + input_feat_stride * feat_offset[None, :]
input_grad_pointer += input_grad_batch_stride * batch_offset[:, None
] + input_grad_feat_stride * feat_offset[None, :]
input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[
None, :], other=-float('inf')).to(tl.float32)
input -= tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
softmax = numerator / tl.sum(numerator, axis=1)[:, None]
output_grad = tl.load(output_grad_pointer).to(tl.float32)
target = tl.load(target_pointer + batch_offset, mask=batch_mask)
broadcasted_feat_offset = tl.broadcast_to(feat_offset[None, :], (
BLOCK_SIZE_BATCH, BLOCK_SIZE_FEAT))
broadcasted_target = tl.broadcast_to(target[:, None], (BLOCK_SIZE_BATCH,
BLOCK_SIZE_FEAT))
input_grad = output_grad * (softmax - (broadcasted_feat_offset ==
broadcasted_target))
if weighted:
weight = tl.load(weight_pointer + target, mask=batch_mask).to(tl.
float32)
sum_weights = tl.load(sum_weights_pointer)
input_grad *= weight[:, None] / sum_weights
else:
input_grad /= batch_dim
tl.store(input_grad_pointer, input_grad, mask=batch_mask[:, None] &
feat_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput",
"Batch-Oriented"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/cross_entropy_loss_kernels.py |
f3a6e3a3-92be-4174-ade1-4a029fd7be9b | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/retention/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BV': BV}, num_warps=num_warps,
num_stages=num_stages) for BV in [32, 64, 128] for num_warps in [2, 4] for
num_stages in [2, 3, 4]], key=['BT'])
@triton.jit
def chunk_retention_bwd_kernel_dqkv(q, k, v, h, do, dh, dq, dk, dv, offsets,
indices, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV:
tl.constexpr, NT: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
all = B * T
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
o_i = tl.arange(0, BT)
d_q, d_k = tl.math.exp2((o_i + 1) * b_b), tl.math.exp2((min(BT, T - i_t *
BT) - o_i - 1) * b_b)
d_q = (d_q * scale).to(d_q.dtype)
m_s = o_i[:, None] >= o_i[None, :]
d_s = tl.where(m_s, tl.math.exp2((o_i[:, None] - o_i[None, :]) * b_b), 0
) * scale
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k * BK,
i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H * K),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_s = tl.dot(b_k, b_q, allow_tf32=False) * tl.trans(d_s)
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_ds = tl.zeros([BT, BT], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_k * B * H + i_bh) * T * V, (T,
V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_bh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + ((i_k * all + bos) * H + i_h) * V,
(T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_ds += tl.dot(b_do, tl.trans(b_v), allow_tf32=False)
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_dk += tl.dot(b_v, tl.trans(b_dh).to(b_v.dtype), allow_tf32=False)
b_dv = tl.dot(b_k, b_dh, allow_tf32=False) * d_k[:, None] + tl.dot(b_s
.to(b_q.dtype), b_do, allow_tf32=False)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
b_ds = (b_ds * d_s).to(b_q.dtype)
b_dq = b_dq * d_q[:, None] + tl.dot(b_ds, b_k, allow_tf32=False)
b_dk = b_dk * d_k[:, None] + tl.trans(tl.dot(b_q, b_ds, allow_tf32=False))
if HEAD_FIRST:
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/chunk.py |
4cb7f10c-e876-4404-a8ee-8dfb7a2f3050 | sparse_copy.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/sparse_copy.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def copy_sparse_to_dense_grad_score_kernel(input_ptr, grad_output_ptr,
grad_scores_ptr, sparse_rows_ptr, num_columns: tl.constexpr,
num_experts_per_token: tl.constexpr, block_size: tl.constexpr):
dense_row = tl.program_id(0)
top_index = tl.program_id(1)
sparse_row = tl.load(sparse_rows_ptr + dense_row *
num_experts_per_token + top_index)
grad_output_ptr += dense_row * num_columns
input_ptr += sparse_row * num_columns
offsets = tl.arange(0, block_size)
if num_columns % block_size == 0:
grad_scores = tl.load(input_ptr + offsets).to(tl.float32) * tl.load(
grad_output_ptr + offsets).to(tl.float32)
else:
mask = offsets < num_columns
grad_scores = tl.load(input_ptr + offsets, mask=mask).to(tl.float32
) * tl.load(grad_output_ptr + offsets, mask=mask).to(tl.float32)
for i in range(1, tl.cdiv(num_columns, block_size)):
offsets += block_size
if num_columns % block_size == 0:
grad_scores += tl.load(input_ptr + offsets).to(tl.float32
) * tl.load(grad_output_ptr + offsets).to(tl.float32)
else:
mask = offsets < num_columns
grad_scores += tl.load(input_ptr + offsets, mask=mask).to(tl.
float32) * tl.load(grad_output_ptr + offsets, mask=mask).to(tl
.float32)
tl.store(grad_scores_ptr + dense_row * num_experts_per_token +
top_index, tl.sum(grad_scores))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_copy.py |
0958dea2-dc46-4f8c-9895-f76364fbcc17 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/retention/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'NV': lambda args: triton.cdiv(args['V'], args['BV']),
'OUTPUT_ATTENTIONS': lambda args: args['attn'] is not None})
@triton.jit
def parallel_retention_fwd_kernel(q, k, v, o, attn, scale, B: tl.constexpr,
H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT:
tl.constexpr, BS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NV:
tl.constexpr, OUTPUT_ATTENTIONS: tl.constexpr):
i_kv, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_k, i_v = i_kv // NV, i_kv % NV
i_h = i_bh % H
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
o_k = tl.arange(0, BS)
d_h = tl.math.exp2((BS - o_k) * b_b)
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k * BK, 0),
(BK, BS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (0, i_v * BV),
(BS, BV), (1, 0))
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(attn + (i_k * B * H + i_bh) * T * T, (T, T),
(T, 1), (i_t * BT, 0), (BT, BS), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_o = tl.zeros([BT, BV], dtype=tl.float32)
for i in range(0, i_t * BT, BS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_s = tl.dot(b_q, b_k, allow_tf32=False) * d_h
if i > 0:
b_o = b_o * tl.math.exp2(b_b * BS)
b_o += tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
p_k = tl.advance(p_k, (0, BS))
p_v = tl.advance(p_v, (BS, 0))
if OUTPUT_ATTENTIONS:
tl.store(p_a, b_s.to(p_a.dtype.element_ty), boundary_check=(0, 1))
p_a = tl.advance(p_a, (0, BS))
tl.debug_barrier()
o_q = tl.arange(0, BT)
d_q = tl.math.exp2(tl.arange(0, BT) * b_b)
b_o *= d_q[:, None]
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k * BK,
i_t * BT), (BK, BS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BS, BV), (1, 0))
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(attn + (i_k * B * H + i_bh) * T * T, (T, T),
(T, 1), (i_t * BT, i_t * BT), (BT, BS), (1, 0))
for _ in range(i_t * BT, (i_t + 1) * BT, BS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
m_s = o_q[:, None] >= o_k[None, :]
d_s = tl.where(m_s, tl.math.exp2((o_q[:, None] - o_k[None, :]) *
b_b), 0)
b_s = tl.dot(b_q, b_k, allow_tf32=False) * d_s
b_o += tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False)
if OUTPUT_ATTENTIONS:
tl.store(p_a, b_s.to(p_a.dtype.element_ty), boundary_check=(0, 1))
p_a = tl.advance(p_a, (0, BS))
p_k = tl.advance(p_k, (0, BS))
p_v = tl.advance(p_v, (BS, 0))
o_k += BS
p_o = tl.make_block_ptr(o + (i_bh + B * H * i_k) * T * V, (T, V), (V, 1
), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/parallel.py |
509c75fc-fcc7-4a43-b0ef-aa00f4263dba | slstm_fw.py | NX-AI/flashrnn | flashrnn/flashrnn/triton_fused/slstm_fw.py | 3fca666a81c8740af4878d7bc5e2a51900e4fe14 | 0 | @triton.autotune(configs, key=['siz_B', 'T', 'B', 'NH', 'DH'])
@triton.jit
def _forward_sequence_kernel(states_initial, Wx, R, b, states_all,
gates_all, T: tl.constexpr, NS: tl.constexpr, B: tl.constexpr, NH: tl.
constexpr, DH: tl.constexpr, NGI: tl.constexpr, NGR: tl.constexpr,
siz_B: tl.constexpr, OUTPUT_GATES: tl.constexpr, DTYPE: tl.constexpr=tl
.float32):
idx_b_NH, idx_b_B = tl.program_id(0), tl.program_id(1)
str_matWx_NH = T * NGI * B * DH
str_matWx_T = NGI * B * DH
str_matStatesAll_NH = (T + 1) * NS * B * DH
str_matStatesAll_T = NS * B * DH
str_matGatesAll_NH = T * NGI * B * DH
str_matGatesAll_T = NGI * B * DH
matHtrans_initial_ptr = tl.make_block_ptr(base=states_initial +
idx_b_NH * NS * B * DH + 0 * B * DH, shape=(B, DH), strides=(DH, 1),
offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
matHtrans = tl.load(matHtrans_initial_ptr).to(tl.float32)
matCtrans_initial_ptr = tl.make_block_ptr(base=states_initial +
idx_b_NH * NS * B * DH + 1 * B * DH, shape=(B, DH), strides=(DH, 1),
offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
matCtrans = tl.load(matCtrans_initial_ptr).to(tl.float32)
matNtrans_initial_ptr = tl.make_block_ptr(base=states_initial +
idx_b_NH * NS * B * DH + 2 * B * DH, shape=(B, DH), strides=(DH, 1),
offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
matNtrans = tl.load(matNtrans_initial_ptr).to(tl.float32)
matMtrans_initial_ptr = tl.make_block_ptr(base=states_initial +
idx_b_NH * NS * B * DH + 3 * B * DH, shape=(B, DH), strides=(DH, 1),
offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
matMtrans = tl.load(matMtrans_initial_ptr).to(tl.float32)
matHtrans_initial_store_ptr = tl.make_block_ptr(base=states_all +
idx_b_NH * str_matStatesAll_NH + 0 * str_matStatesAll_T + 0 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matHtrans_initial_store_ptr, matHtrans.to(DTYPE))
matCtrans_initial_store_ptr = tl.make_block_ptr(base=states_all +
idx_b_NH * str_matStatesAll_NH + 0 * str_matStatesAll_T + 1 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matCtrans_initial_store_ptr, matCtrans.to(DTYPE))
matNtrans_initial_store_ptr = tl.make_block_ptr(base=states_all +
idx_b_NH * str_matStatesAll_NH + 0 * str_matStatesAll_T + 2 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matNtrans_initial_store_ptr, matNtrans.to(DTYPE))
matMtrans_initial_store_ptr = tl.make_block_ptr(base=states_all +
idx_b_NH * str_matStatesAll_NH + 0 * str_matStatesAll_T + 3 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matMtrans_initial_store_ptr, matMtrans.to(DTYPE))
matRtrans_i_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
0 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_i = tl.load(matRtrans_i_ptr)
matRtrans_f_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
1 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_f = tl.load(matRtrans_f_ptr)
matRtrans_z_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
2 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_z = tl.load(matRtrans_z_ptr)
matRtrans_o_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
3 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_o = tl.load(matRtrans_o_ptr)
vecB_i_ptr = b + idx_b_NH * NGI * DH + 0 * DH + tl.arange(0, DH)
vecB_i = tl.load(vecB_i_ptr)
vecB_f_ptr = b + idx_b_NH * NGI * DH + 1 * DH + tl.arange(0, DH)
vecB_f = tl.load(vecB_f_ptr)
vecB_z_ptr = b + idx_b_NH * NGI * DH + 2 * DH + tl.arange(0, DH)
vecB_z = tl.load(vecB_z_ptr)
vecB_o_ptr = b + idx_b_NH * NGI * DH + 3 * DH + tl.arange(0, DH)
vecB_o = tl.load(vecB_o_ptr)
for idx_t in range(T):
matIxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 0 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matIxtrans = tl.load(matIxtrans_ptr)
matFxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 1 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matFxtrans = tl.load(matFxtrans_ptr)
matZxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 2 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matZxtrans = tl.load(matZxtrans_ptr)
matOxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 3 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matOxtrans = tl.load(matOxtrans_ptr)
matRhtrans_i = tl.dot(matHtrans.to(DTYPE), matRtrans_i)
matRhtrans_f = tl.dot(matHtrans.to(DTYPE), matRtrans_f)
matRhtrans_z = tl.dot(matHtrans.to(DTYPE), matRtrans_z)
matRhtrans_o = tl.dot(matHtrans.to(DTYPE), matRtrans_o)
matIbar = matIxtrans + matRhtrans_i + vecB_i[None, :]
matFbar = matFxtrans + matRhtrans_f + vecB_f[None, :]
matZbar = matZxtrans + matRhtrans_z + vecB_z[None, :]
matObar = matOxtrans + matRhtrans_o + vecB_o[None, :]
matLogFplusM = matMtrans + tl.log(tl.sigmoid(matFbar))
matMtrans_next = tl.where(matNtrans == 0.0, matIbar, tl.maximum(
matIbar, matLogFplusM))
matI = tl.exp(matIbar - matMtrans_next)
matF = tl.exp(matLogFplusM - matMtrans_next)
matZ = triton_tanh(matZbar)
matO = tl.sigmoid(matObar)
matCtrans_next = matF * matCtrans + matI * matZ
matNtrans_next = tl.maximum(matF * matNtrans + matI, 1.0)
matHtrans_next = matO * (matCtrans_next / matNtrans_next)
matHtrans_next_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 0 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matHtrans_next_ptr, matHtrans_next.to(DTYPE))
matCtrans_next_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 1 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matCtrans_next_ptr, matCtrans_next.to(DTYPE))
matNtrans_next_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 2 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matNtrans_next_ptr, matNtrans_next.to(DTYPE))
matMtrans_next_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 3 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matMtrans_next_ptr, matMtrans_next.to(DTYPE))
if OUTPUT_GATES:
matGatesItrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesItrans_ptr, matIbar.to(DTYPE))
matGatesFtrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesFtrans_ptr, matFbar.to(DTYPE))
matGatesZtrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesZtrans_ptr, matZ.to(DTYPE))
matGatesOtrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
3 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesOtrans_ptr, matO.to(DTYPE))
matCtrans = matCtrans_next
matHtrans = matHtrans_next
matNtrans = matNtrans_next
matMtrans = matMtrans_next
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT",
"BSD"
] | https://github.com/NX-AI/flashrnn/blob/3fca666a81c8740af4878d7bc5e2a51900e4fe14/flashrnn/flashrnn/triton_fused/slstm_fw.py |
8404cde9-7803-4d09-a284-c110eb62f90a | mlp.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/mlp.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_mlp_activation_forward_kernel(input_ptr, output_ptr, gated: tl.
constexpr, activation_type: tl.constexpr, n_cols: tl.constexpr,
block_size: tl.constexpr):
row_idx = tl.program_id(0).to(tl.int64)
columns = tl.program_id(1) * block_size + tl.arange(0, block_size)
output_offsets = n_cols * row_idx + columns
input_offsets = 2 * n_cols * row_idx + columns if gated else output_offsets
input_ptr = input_ptr + input_offsets
mask = columns < n_cols
input_ = tl.load(input_ptr, mask=mask).to(tl.float32)
if activation_type == _TritonActivationType.gelu.value:
tanh_input = 0.79788456 * input_ * (1 + 0.044715 * input_ * input_)
tanh = 1 - 2 / (1 + tl.exp(2 * tanh_input))
out = input_ * 0.5 * (1.0 + tanh)
elif activation_type == _TritonActivationType.silu.value:
out = input_ / (1 + tl.exp(-input_))
elif activation_type == _TritonActivationType.relu.value:
out = tl.where(input_ > 0, input_, 0)
elif activation_type == _TritonActivationType.squared_relu:
relu_out = tl.where(input_ > 0, input_, 0)
out = relu_out * relu_out
else:
raise NotImplementedError()
if gated:
other = tl.load(input_ptr + n_cols, mask=mask)
out = out * other
tl.store(output_ptr + output_offsets, out, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/mlp.py |
83293d43-8c29-4602-b2d3-8e55f706ba6a | normalization.py | ai-compiler-study/triton-kernels | triton_kernels/kernels/normalization.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def _rms_norm_fwd(X, Y, W, Rstd, N, eps, BLOCK_M: tl.constexpr, BLOCK_N: tl
.constexpr):
row_offset = tl.program_id(0) * BLOCK_M
row_index = row_offset + tl.arange(0, BLOCK_M)[:, None]
col_index = tl.arange(0, BLOCK_N)[None, :]
col_mask = col_index < N
x = tl.load(X + N * row_index + col_index, col_mask, other=0.0)
w = tl.load(W + col_index, col_mask, eviction_policy='evict_last',
other=0.0)
xx = x * x
xx = tl.broadcast_to(xx, [BLOCK_M, BLOCK_N])
mean = tl.sum(xx, axis=1)[:, None] / N
rstd = tl.rsqrt(mean + eps)
y = x * rstd * w
tl.store(Rstd + row_index, rstd)
tl.store(Y + N * row_index + col_index, y, col_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/kernels/normalization.py |
87e4c097-d2ca-4b41-ab22-ac0e29843a3c | RzLinearForward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearForward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def rz_linear_forward_kernel_notune(a_ptr, b_ptr, c_ptr, init_factor, M, N,
K, H, stride_am, stride_ak, stride_cm, stride_cn, allow_tf32: tl.
constexpr, R7: int, R6: int, R5: int, R4: int, R3: int, R2: int, R1:
int, R0: int, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_forward_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=stride_am,
stride_ak=stride_ak, stride_cm=stride_cm, stride_cn=stride_cn,
allow_tf32=allow_tf32, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1
=R1, R0=R0, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearForward.py |
7971a4d5-d72d-49a0-8810-4b44d3e7eab4 | real_rnn_tie_input_gate.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def fwd_sequential_scan(v, f1, hidden, B, L, C, BLOCK_M: tl.constexpr):
offset_b = tl.program_id(0)
if offset_b >= B:
return
offset_n = tl.program_id(1)
ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + offset_n * BLOCK_M
h1 = tl.zeros([BLOCK_M], dtype=tl.float32)
for _ in range(L):
x0 = tl.load(v + ptr).to(tl.float32)
decay1 = tl.load(f1 + ptr).to(tl.float32)
h1 = (h1 - x0) * decay1 + x0
tl.store(hidden + ptr, h1.to(hidden.dtype.element_ty))
ptr += C
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py |
88e83a8a-262f-4bec-a1fa-c5f9b43f17d6 | relu.py | daemyung/practice-triton | relu.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @staticmethod
@triton.jit
def forward(output_ptr, input_ptr, size, block_size: tl.constexpr):
pid = tl.program_id(0)
offset = pid * block_size
input_block_ptr = tl.make_block_ptr(input_ptr, shape=(size,), strides=(
1,), offsets=(offset,), block_shape=(block_size,), order=(0,))
output_block_ptr = tl.make_block_ptr(output_ptr, shape=(size,), strides
=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,))
input = tl.load(input_block_ptr, boundary_check=(0,))
condition = input >= 0
output = tl.where(condition, input, 0)
tl.store(output_block_ptr, output, boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/relu.py |
88873c2a-b635-4908-bbff-feff8674e931 | rotary_embedding.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/rotary_embedding.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _rotary_embedding_kernel(q_rot_ptr, k_rot_ptr, q_ptr, k_ptr, cos_ptr,
sin_ptr, seq_len, batch_size, num_heads, num_kv, hidden_size, q_strides,
q_strideb, q_strideh, q_strided, k_strides, k_strideb, k_stridekv,
k_strided, seq_offset, BLOCK_SIZE_SEQ: tl.constexpr, BLOCK_SIZE_BH: tl.
constexpr, BLOCK_SIZE_D: tl.constexpr):
pid = tl.program_id(axis=0)
num_bh_blocks = tl.cdiv(batch_size * num_heads, BLOCK_SIZE_BH)
num_d_blocks = tl.cdiv(hidden_size // 2, BLOCK_SIZE_D)
bh_id = pid % num_bh_blocks
d_id = pid // num_bh_blocks % num_d_blocks
seq_block_id = pid // num_bh_blocks // num_d_blocks
seq_offs = seq_offset + seq_block_id * BLOCK_SIZE_SEQ + tl.arange(0,
BLOCK_SIZE_SEQ)
bh_offs = bh_id * BLOCK_SIZE_BH + tl.arange(0, BLOCK_SIZE_BH)
q_common_offs = seq_offs[:, None, None] * q_strides + bh_offs[None, :, None
] * q_strideh
k_common_offs = seq_offs[:, None, None] * k_strides + bh_offs[None, :, None
] // (num_heads // num_kv) * k_stridekv
q_base_offs, qo_base_offs = (q_ptr + q_common_offs, q_rot_ptr +
q_common_offs)
k_base_offs, ko_base_offs = (k_ptr + k_common_offs, k_rot_ptr +
k_common_offs)
c_base_offs = cos_ptr + seq_offs[:, None] * hidden_size
s_base_offs = sin_ptr + seq_offs[:, None] * hidden_size
hidden_block_range = tl.arange(0, BLOCK_SIZE_D)
hidden_offs_l = d_id * BLOCK_SIZE_D + hidden_block_range
hidden_offs_r = hidden_size // 2 + hidden_offs_l
mask_l, mask_r = (hidden_offs_l < hidden_size // 2, hidden_offs_r <
hidden_size)
mask_bh = bh_offs < batch_size * num_heads
mask_seq = seq_offs < seq_len
mask_bh_seq = mask_bh[None, :, None] & mask_seq[:, None, None]
q_l, k_l = tl.load(q_base_offs + hidden_offs_l[None, None, :] *
q_strided, mask=mask_l[None, None, :] & mask_bh_seq, other=0), tl.load(
k_base_offs + hidden_offs_l[None, None, :] * k_strided, mask=mask_l
[None, None, :] & mask_bh_seq, other=0)
q_r, k_r = tl.load(q_base_offs + hidden_offs_r[None, None, :] *
q_strided, mask=mask_r[None, None, :] & mask_bh_seq, other=0), tl.load(
k_base_offs + hidden_offs_r[None, None, :] * k_strided, mask=mask_r
[None, None, :] & mask_bh_seq, other=0)
cos_l, cos_r = tl.load(c_base_offs + hidden_offs_l[None, :], mask=
mask_l[None, :], other=0)[:, None, :], tl.load(c_base_offs +
hidden_offs_r[None, :], mask=mask_r[None, :], other=0)[:, None, :]
sin_l, sin_r = tl.load(s_base_offs + hidden_offs_l[None, :], mask=
mask_l[None, :], other=0)[:, None, :], tl.load(s_base_offs +
hidden_offs_r[None, :], mask=mask_r[None, :], other=0)[:, None, :]
qo_l = q_l * cos_l - q_r * sin_l
tl.store(qo_base_offs + hidden_offs_l, qo_l, mask=mask_l[None, None, :] &
mask_bh_seq)
qo_r = q_r * cos_r + q_l * sin_r
tl.store(qo_base_offs + hidden_offs_r, qo_r, mask=mask_r[None, None, :] &
mask_bh_seq)
ko_l = k_l * cos_l - k_r * sin_l
tl.store(ko_base_offs + hidden_offs_l, ko_l, mask=mask_l[None, None, :] &
mask_bh_seq)
ko_r = k_r * cos_r + k_l * sin_r
tl.store(ko_base_offs + hidden_offs_r, ko_r, mask=mask_r[None, None, :] &
mask_bh_seq)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/rotary_embedding.py |
1629adb1-4300-4648-8f28-5969b18864d5 | block_ptr.py | daemyung/practice-triton | block_ptr.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @triton.jit
def add_kernel(x_ptr, y_ptr, z_ptr, size, block_size: tl.constexpr):
offset = tl.program_id(0) * block_size
x_block_ptr = tl.make_block_ptr(x_ptr, shape=(size,), strides=(1,),
offsets=(offset,), block_shape=(block_size,), order=(0,))
y_block_ptr = tl.make_block_ptr(y_ptr, shape=(size,), strides=(1,),
offsets=(offset,), block_shape=(block_size,), order=(0,))
x = tl.load(x_block_ptr, boundary_check=(0,))
y = tl.load(y_block_ptr, boundary_check=(0,))
z = x + y
z_block_ptr = tl.make_block_ptr(z_ptr, shape=(size,), strides=(1,),
offsets=(offset,), block_shape=(block_size,), order=(0,))
tl.store(z_block_ptr, z, boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/block_ptr.py |
df24c070-c472-4437-97d2-8959ecc59778 | cross_entropy.py | dame-cell/Triformer | triformer/cross_entropy.py | 0712537d576166b93fa09aa9509b2661b9ed8a68 | 0 | @triton.jit
def cross_entropy_fwd_bwd_kernel(output_loss_ptr, output_logit_grad_ptr,
input_logit_ptr, input_targ_ptr, input_divisor_ptr, output_loss_stride,
output_logit_grad_stride, input_logit_stride, input_targ_stride, n_cols,
ignore_index, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
logit_grad_row_start_ptr = (output_logit_grad_ptr + row_idx *
output_logit_grad_stride)
logit_row_start_ptr = input_logit_ptr + row_idx * input_logit_stride
targ_ptr = input_targ_ptr + row_idx * input_targ_stride
loss_ptr = output_loss_ptr + row_idx * output_loss_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
logit_row_ptrs = logit_row_start_ptr + col_offsets
logit_grad_row_ptrs = logit_grad_row_start_ptr + col_offsets
logit_row = tl.load(logit_row_ptrs, mask=col_offsets < n_cols, other=
float('-Inf'))
targ = tl.load(targ_ptr)
divisor = tl.load(input_divisor_ptr)
logit_row = logit_row - tl.max(logit_row, axis=0)
exp_logit_row = tl.exp(logit_row)
sum_exp_logit_row = tl.sum(exp_logit_row, axis=0)
log_sum_exp_logit_row = tl.log(sum_exp_logit_row)
logit_gt_logit = tl.sum(tl.where(targ == col_offsets, logit_row, 0.0))
loss = log_sum_exp_logit_row - logit_gt_logit
loss = loss / divisor
loss = tl.where(targ == ignore_index, 0.0, loss)
tl.store(loss_ptr, loss)
targ_one_hot = tl.where(targ == col_offsets, 1.0, 0.0)
grad = exp_logit_row / sum_exp_logit_row - targ_one_hot
grad = grad / divisor
grad = tl.where(targ == ignore_index, 0.0, grad)
tl.store(logit_grad_row_ptrs, grad, mask=col_offsets < n_cols)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/cross_entropy.py |
d1bbad34-33d0-441b-b7e1-fbac372c752f | chunk_h_split.py | sustcsonglin/flash-linear-attention | fla/ops/common/chunk_h_split.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'
] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for
num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=['BT', 'USE_G',
'USE_GK', 'USE_GV'])
@triton.jit
def chunk_bwd_kernel_dh_reduction(g, gk, gv, dhs, dhr, dh0, offsets,
split_offsets, T: tl.constexpr, S: tl.constexpr, H: tl.constexpr, HQ:
tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK:
tl.constexpr, BV: tl.constexpr, NG: tl.constexpr, USE_G: tl.constexpr,
USE_GK: tl.constexpr, USE_GV: tl.constexpr,
STORE_INITIAL_STATE_GRADIENT: tl.constexpr, USE_OFFSETS: tl.constexpr,
HEAD_FIRST: tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_hq = i_nh // HQ, i_nh % HQ
i_ng, i_h = i_nh // NG, i_hq // NG
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NS = tl.cdiv(T, S)
boh = tl.load(split_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NS = tl.cdiv(T, S)
boh = i_n * NS
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
for i_s in range(NS - 2, -1, -1):
p_dhs = tl.make_block_ptr(dhs + ((boh + i_s + 1) * H + i_h) * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_dhr = tl.make_block_ptr(dhr + ((boh + i_s) * H + i_h) * K * V, (K,
V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dhs, boundary_check=(0, 1)).to(tl.float32)
tl.store(p_dhr, b_dh.to(p_dhr.dtype.element_ty), boundary_check=(0, 1))
for i_t in range(tl.cdiv(min(i_s * S + S, T), BT) - 1, tl.cdiv(i_s *
S, BT) - 1, -1):
last_idx = min(i_t * BT + BT, T) - 1
if USE_G:
if HEAD_FIRST:
b_g_last = tl.load(g + i_ng * T + last_idx)
else:
b_g_last = tl.load(g + (bos + last_idx) * H + i_h)
b_dh *= tl.exp(b_g_last)
if USE_GK:
if HEAD_FIRST:
p_gk_last = gk + (i_ng * T + last_idx
) * K + i_k * BK + tl.arange(0, BK)
else:
p_gk_last = gk + (bos + last_idx
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK
)
b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0,
BK) < K, other=0.0)
b_dh *= tl.exp(b_gk_last)[:, None]
if USE_GV:
if HEAD_FIRST:
p_gv_last = gv + (i_ng * T + last_idx
) * V + i_v * BV + tl.arange(0, BV)
else:
p_gv_last = gv + (bos + last_idx
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV
)
b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0,
BV) < V, other=0.0)
b_dh *= tl.exp(b_gv_last)[None, :]
if NS > 1:
if STORE_INITIAL_STATE_GRADIENT:
p_dhs = tl.make_block_ptr(dhs + (boh * H + i_h) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (
i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dhs, boundary_check=(0, 1)).to(tl.float32)
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check
=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_split.py |
12456573-9f7c-4e65-b821-93e96495dd34 | quant_triton.py | CompendiumLabs/ziggy | ziggy/backends/quant_triton.py | bd12fe50ca3475743f62ae26d4c184108e441e03 | 0 | @triton.jit
def clamp(x, a, b):
return tl.maximum(a, tl.minimum(b, x))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/CompendiumLabs/ziggy/blob/bd12fe50ca3475743f62ae26d4c184108e441e03/ziggy/backends/quant_triton.py |
434a7a78-d4a9-4a24-a028-0bbe017c400b | masks.py | drisspg/transformer_nuggets | transformer_nuggets/flash/masks.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def score_modification(score, offs_m, start_n, offs_n, off_hz, num_heads, q,
k, mask_block_ptr, BIAS_CHOICE: tl.constexpr, DEBUG_MASK: tl.constexpr,
IS_CAUSAL: tl.constexpr, MATMUL_PRECISION: tl.constexpr=tl.float16):
batch = off_hz // num_heads
head = off_hz % num_heads
seq_len_q = offs_m[:, None]
seq_len_kv = start_n + offs_n[None, :]
if BIAS_CHOICE == BiasMode.rel_pos.value:
score = rel_attention_triton(score, batch, head, seq_len_q, seq_len_kv)
elif BIAS_CHOICE == BiasMode.alibi.value:
score = alibi_attention_triton(score, batch, head, seq_len_q,
seq_len_kv, num_heads)
elif BIAS_CHOICE == BiasMode.inverse_causal.value:
score = inverse_causal_mask_triton(score, batch, head, seq_len_q,
seq_len_kv)
elif BIAS_CHOICE == BiasMode.causal.value:
score = causal_mask_triton(score, batch, head, seq_len_q, seq_len_kv)
if DEBUG_MASK and BIAS_CHOICE != BiasMode.none:
mask = score - tl.dot(q.to(MATMUL_PRECISION), k.to(MATMUL_PRECISION))
tl.store(mask_block_ptr, mask)
return score
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/masks.py |
049b3fe1-f26d-423e-844f-bef45e5755b6 | masks.py | drisspg/transformer_nuggets | transformer_nuggets/flash/masks.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def rel_attention_triton(score, batch, head, seq_len_q, seq_len_kv):
bias = seq_len_kv - seq_len_q
score = score + bias
return score
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/masks.py |
5ff91ea0-293b-4837-826f-44c0fe4e319c | triton_kernels.py | vkuzo/pytorch_scripts | reduction_hack/triton_kernels.py | 15322fe2a72fbebe1b9d9e38978cb6476da3bf70 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE': 512}), triton.Config
({'BLOCK_SIZE': 1024}), triton.Config({'BLOCK_SIZE': 2048}), triton.
Config({'BLOCK_SIZE': 8192}), triton.Config({'BLOCK_SIZE': 16384})],
key=['n_elements'])
@triton.jit
def max_with_atomics_kernel(in_ptr0, out_ptr, n_elements, BLOCK_SIZE:
'tl.constexpr'):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
x_max = tl.max(x)
tl.atomic_max(out_ptr, x_max)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Top-K Selection"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/vkuzo/pytorch_scripts/blob/15322fe2a72fbebe1b9d9e38978cb6476da3bf70/reduction_hack/triton_kernels.py |
c2fc4d8e-023b-46eb-8021-911d529c03b8 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def fused_recurrent_delta_rule_fwd_kernel(q, k, v, u, beta, o, h0, ht,
offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr,
IS_BETA_HEADWISE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
if HEAD_FIRST:
p_q = q + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_k = k + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_v = v + i_nh * T * V + i_v * BV + tl.arange(0, BV)
p_u = u + i_nh * T * V + i_v * BV + tl.arange(0, BV)
if IS_BETA_HEADWISE:
p_beta = beta + i_nh * T * V + i_v * BV + tl.arange(0, BV)
else:
p_beta = beta + i_nh * T
p_o = o + (i_k * B * H + i_nh) * T * V + i_v * BV + tl.arange(0, BV)
else:
p_q = q + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK)
p_k = k + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK)
p_v = v + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
p_u = u + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
if IS_BETA_HEADWISE:
p_beta = beta + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
else:
p_beta = beta + bos * H + i_h
p_o = o + ((i_k * all + bos) * H + i_h) * V + i_v * BV + tl.arange(
0, BV)
mask_k = i_k * BK + tl.arange(0, BK) < K
mask_v = i_v * BV + tl.arange(0, BV) < V
mask_h = mask_k[None, :] & mask_v[:, None]
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_v_minus = tl.sum(b_h * b_k[None, :], axis=1)
b_v -= b_v_minus
if IS_BETA_HEADWISE:
b_beta = tl.load(p_beta, mask=mask_v, other=0).to(tl.float32)
else:
b_beta = tl.load(p_beta).to(tl.float32)
tl.store(p_u, b_v.to(p_v.dtype.element_ty), mask=mask_v)
b_v *= b_beta
b_h += b_k[None, :] * b_v[:, None]
b_o = b_h * b_q[None, :]
b_o = tl.sum(b_o, axis=1)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), mask=mask_v)
p_q += K if HEAD_FIRST else H * K
p_k += K if HEAD_FIRST else H * K
p_o += V if HEAD_FIRST else H * V
p_v += V if HEAD_FIRST else H * V
p_u += V if HEAD_FIRST else H * V
p_beta += (1 if HEAD_FIRST else H) * (V if IS_BETA_HEADWISE else 1)
if STORE_FINAL_STATE:
p_ht = ht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_h)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Latency Sensitive"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/fused_recurrent.py |
6222b5ee-b5c3-4653-81db-38b9198dc016 | rms_norm_dquant.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/rms_norm_dquant.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _rms_norm_dquant_kernel(X, Y, W, scale, stride, N, eps, BLOCK_SIZE: tl.
constexpr):
row = tl.program_id(0)
Y += row * stride
X += row * stride
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
_var += x * x
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
_max_x = 0.0
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32)
w = tl.load(W + cols, mask=mask)
norm = x * rstd * w
_max_x = tl.maximum(_max_x, tl.max(tl.abs(norm), axis=0))
scale_x = _max_x / 127.0
tl.store(scale + row, scale_x)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32)
w = tl.load(W + cols, mask=mask)
norm = x * rstd * w
norm = norm / scale_x
norm = tl.where(norm > 0, norm + 0.5, norm - 0.5)
tl.store(Y + cols, norm.to(tl.int8), mask=mask)
| {
"Data Type": [
"fp32",
"int8"
],
"Functionality": [
"Normalization",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/rms_norm_dquant.py |
cfcb2dbd-a045-4be5-8067-8ff20faf4034 | seqlen_utils.py | Kitsunetic/kitsu | kitsu/nn/seqlen_utils.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def seqlen_to_batch_index_kernel(seqlen_ptr, idx_ptr, BLK: tl.constexpr):
pid = tl.program_id(0)
i = tl.load(seqlen_ptr + pid)
j = tl.load(seqlen_ptr + pid + 1)
idx = tl.arange(0, BLK)
tl.store(idx_ptr + i + idx, pid, mask=idx < j - i)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/seqlen_utils.py |
272fdc2b-cbf6-4942-9981-90ca27a426e6 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def fused_recurrent_delta_rule_bwd_kernel(q, k, v, beta, h0, dh0, dht, do,
dq, dk, dv, db, offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl
.constexpr, K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr, NK: tl.constexpr, IS_BETA_HEADWISE: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
mask_k = i_k * BK + tl.arange(0, BK) < K
mask_v = i_v * BV + tl.arange(0, BV) < V
if HEAD_FIRST:
p_q = q + i_nh * T * K + i_k * BK + tl.arange(0, BK) + (T - 1) * K
p_k = k + i_nh * T * K + i_k * BK + tl.arange(0, BK) + (T - 1) * K
p_v = v + i_nh * T * V + i_v * BV + tl.arange(0, BV) + (T - 1) * V
p_do = do + i_nh * T * V + i_v * BV + tl.arange(0, BV) + (T - 1) * V
p_dk = dk + (i_v * B * H + i_nh) * T * K + i_k * BK + tl.arange(0, BK
) + (T - 1) * K
p_dv = dv + (i_k * B * H + i_nh) * T * V + i_v * BV + tl.arange(0, BV
) + (T - 1) * V
if IS_BETA_HEADWISE:
p_beta = beta + i_nh * T * V + i_v * BV + tl.arange(0, BV) + (T - 1
) * V
p_dbeta = db + (i_v * NK * B * H + i_k * B * H + i_nh
) * T * V + tl.arange(0, BV) + (T - 1) * V
else:
p_beta = beta + i_nh * T + T - 1
p_dbeta = db + (i_v * B * H + i_nh) * T + T - 1
else:
p_q = q + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK) + (T - 1
) * H * K
p_k = k + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK) + (T - 1
) * H * K
p_v = v + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV) + (T - 1
) * H * V
p_do = do + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV) + (T - 1
) * H * V
p_dk = dk + ((i_v * all + bos) * H + i_h) * K + i_k * BK + tl.arange(
0, BK) + (T - 1) * H * K
p_dv = dv + ((i_k * all + bos) * H + i_h) * V + i_v * BV + tl.arange(
0, BV) + (T - 1) * H * V
if IS_BETA_HEADWISE:
p_beta = beta + (bos + T - 1
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_dbeta = db + ((i_v * NK + i_k) * all + bos + T - 1
) * H * V + i_h * V + tl.arange(0, BV)
else:
p_beta = beta + (bos + T - 1) * H + i_h
p_dbeta = db + (i_v * all + bos + T - 1) * H + i_h
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_ht = dht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
b_dh += tl.load(p_ht, mask=mask_k[:, None] & mask_v[None, :], other=0
).to(tl.float32)
for _ in range(T):
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32)
if IS_BETA_HEADWISE:
b_beta = tl.load(p_beta, mask=mask_v, other=0).to(tl.float32)
else:
b_beta = tl.load(p_beta).to(tl.float32)
b_dh += b_q[:, None] * b_do[None, :]
b_dk = tl.sum(b_dh * (b_v * b_beta)[None, :], axis=1)
b_dv = tl.sum(b_dh * b_k[:, None], axis=0)
b_db = b_dv * b_v if IS_BETA_HEADWISE else tl.sum(b_dv * b_v)
b_dv = b_dv * b_beta
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), mask=mask_k)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), mask=mask_v)
if IS_BETA_HEADWISE:
tl.store(p_dbeta, b_db.to(p_dbeta.dtype.element_ty), mask=mask_v)
else:
tl.store(p_dbeta, b_db.to(p_dbeta.dtype.element_ty))
b_dh -= b_k[:, None] * b_dv[None, :]
p_q -= K if HEAD_FIRST else H * K
p_k -= K if HEAD_FIRST else H * K
p_v -= V if HEAD_FIRST else H * V
p_do -= V if HEAD_FIRST else H * V
p_dk -= K if HEAD_FIRST else H * K
p_dv -= V if HEAD_FIRST else H * V
p_dbeta -= (1 if HEAD_FIRST else H) * (V if IS_BETA_HEADWISE else 1)
p_beta -= (1 if HEAD_FIRST else H) * (V if IS_BETA_HEADWISE else 1)
if USE_INITIAL_STATE:
p_dh0 = dh0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), mask=mask_k[:,
None] & mask_v[None, :])
tl.debug_barrier()
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if HEAD_FIRST:
p_q = q + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_k = k + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_v = v + i_nh * T * V + i_v * BV + tl.arange(0, BV)
if IS_BETA_HEADWISE:
p_beta = beta + i_nh * T * V + i_v * BV + tl.arange(0, BV)
else:
p_beta = beta + i_nh * T
p_do = do + i_nh * T * V + i_v * BV + tl.arange(0, BV)
p_dq = dq + (i_v * B * H + i_nh) * T * K + i_k * BK + tl.arange(0, BK)
p_dk = dk + (i_v * B * H + i_nh) * T * K + i_k * BK + tl.arange(0, BK)
p_dv = dv + (i_k * B * H + i_nh) * T * V + i_v * BV + tl.arange(0, BV)
else:
p_q = q + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK)
p_k = k + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK)
p_v = v + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
if IS_BETA_HEADWISE:
p_beta = beta + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
else:
p_beta = beta + bos * H + i_h
p_do = do + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
p_dq = dq + ((i_v * all + bos) * H + i_h) * K + i_k * BK + tl.arange(
0, BK)
p_dk = dk + ((i_v * all + bos) * H + i_h) * K + i_k * BK + tl.arange(
0, BK)
p_dv = dv + ((i_k * all + bos) * H + i_h) * V + i_v * BV + tl.arange(
0, BV)
if USE_INITIAL_STATE:
mask_h = mask_k[:, None] & mask_v[None, :]
p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_dk = tl.load(p_dk, mask=mask_k, other=0).to(tl.float32)
b_dv = tl.load(p_dv, mask=mask_v, other=0).to(tl.float32)
b_dk -= tl.sum(b_dv[None, :] * b_h, axis=1)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), mask=mask_k)
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32)
if IS_BETA_HEADWISE:
b_beta = tl.load(p_beta, mask=mask_v, other=0).to(tl.float32)
else:
b_beta = tl.load(p_beta).to(tl.float32)
b_v *= b_beta
b_h += b_k[:, None] * b_v[None, :]
b_dq = b_h * b_do[None, :]
d_q = tl.sum(b_dq, axis=1) * scale
tl.store(p_dq, d_q.to(p_dq.dtype.element_ty), mask=mask_k)
p_k += K if HEAD_FIRST else H * K
p_v += V if HEAD_FIRST else H * V
p_do += V if HEAD_FIRST else H * V
p_dq += K if HEAD_FIRST else H * K
p_dk += K if HEAD_FIRST else H * K
p_dv += V if HEAD_FIRST else H * V
p_beta += (1 if HEAD_FIRST else H) * (V if IS_BETA_HEADWISE else 1)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/fused_recurrent.py |
6737d4e9-b985-4927-a44a-82cdfbceeae6 | fused_cross_entropy.py | sustcsonglin/flash-linear-attention | fla/modules/fused_cross_entropy.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'HAS_SMOOTHING': lambda args: args['label_smoothing'] >
0.0})
@triton.jit
def cross_entropy_bwd_kernel(dlogits_ptr, dloss_ptr, logits_ptr, lse_ptr,
labels_ptr, label_smoothing, logit_scale, lse_square_scale,
ignore_index, total_classes, class_start_idx, n_cols, logits_row_stride,
dlogits_row_stride, dloss_row_stride, BLOCK_SIZE: tl.constexpr,
HAS_SMOOTHING: tl.constexpr):
row_idx = tl.program_id(0)
col_block_idx = tl.program_id(1)
logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
dlogits_ptr = dlogits_ptr + row_idx * dlogits_row_stride.to(tl.int64)
col_offsets = col_block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
label_idx = tl.load(labels_ptr + row_idx)
if label_idx != ignore_index:
dloss = tl.load(dloss_ptr + row_idx * dloss_row_stride)
else:
dloss = 0.0
logits = tl.load(logits_ptr + col_offsets, mask=col_offsets < n_cols,
other=-float('inf')).to(tl.float32) * logit_scale
lse = tl.load(lse_ptr + row_idx)
probs = tl.exp(logits - lse)
probs += 2.0 * lse_square_scale * lse * probs
label_idx -= class_start_idx
if HAS_SMOOTHING:
smooth_negative = label_smoothing / total_classes
probs = tl.where(col_offsets == label_idx, probs - (1 -
label_smoothing), probs) - smooth_negative
else:
probs = tl.where(col_offsets == label_idx, probs - 1.0, probs)
tl.store(dlogits_ptr + col_offsets, dloss * logit_scale * probs, mask=
col_offsets < n_cols)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_cross_entropy.py |
b912e62b-afb4-4422-890d-6fb1182bfd6e | cumsum.py | sustcsonglin/flash-linear-attention | fla/ops/utils/cumsum.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BT': BT}, num_warps=num_warps) for
BT in [16, 32, 64] for num_warps in [2, 4, 8]], key=['S'])
@triton.jit
def chunk_global_cumsum_vector_kernel(s, z, offsets, T: tl.constexpr, H: tl
.constexpr, S: tl.constexpr, BT: tl.constexpr, BS: tl.constexpr,
HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr):
i_s, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_b).to(tl.int32), tl.load(offsets +
i_b + 1).to(tl.int32)
else:
bos, eos = i_b * T, i_b * T + T
T = eos - bos
o_i = tl.arange(0, BT)
m_s = tl.where(o_i[:, None] >= o_i[None, :], 1.0, 0.0)
b_z = tl.zeros([BS], dtype=tl.float32)
for i_t in range(tl.cdiv(T, BT)):
if HEAD_FIRST:
p_s = tl.make_block_ptr(s + i_bh * T * S, (T, S), (S, 1), (i_t *
BT, i_s * BS), (BT, BS), (1, 0))
p_z = tl.make_block_ptr(z + i_bh * T * S, (T, S), (S, 1), (i_t *
BT, i_s * BS), (BT, BS), (1, 0))
else:
p_s = tl.make_block_ptr(s + (bos * H + i_h) * S, (T, S), (H * S,
1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
p_z = tl.make_block_ptr(z + (bos * H + i_h) * S, (T, S), (H * S,
1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32)
b_c = b_z[None, :] + tl.dot(m_s, b_s, allow_tf32=False)
tl.store(p_z, b_c.to(p_z.dtype.element_ty), boundary_check=(0, 1))
if i_t >= 0:
b_z += tl.sum(b_s, 0)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py |
6a835470-58ab-445d-b53c-4296e4297b79 | triton_kernel.py | yann-Choho/projet_PPML | notebooks/triton_kernel.py | 9274e0561443b01f029ee6e0737f922f71d2da39 | 0 | @triton.autotune(configs=get_autotune_config(), key=['n_elements'])
@triton.jit
def triple_mul_kernel(A_ptr, B_ptr, C_ptr, output_ptr, n_elements,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
A = tl.load(A_ptr + offsets, mask=mask)
B = tl.load(B_ptr + offsets, mask=mask)
C = tl.load(C_ptr + offsets, mask=mask)
output = A * B * C
tl.store(output_ptr + offsets, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/yann-Choho/projet_PPML/blob/9274e0561443b01f029ee6e0737f922f71d2da39/notebooks/triton_kernel.py |
1162e2ac-eec4-4b6a-ac17-f648e706e5b4 | heuristics.py | daemyung/practice-triton | heuristics.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @triton.heuristics({'boundary_check': lambda args: args['x_size'] % args[
'block_size']})
@triton.jit
def add_kernel(x_ptr, y_ptr, z_ptr, size, block_size: tl.constexpr,
boundary_check: tl.constexpr):
offset = tl.program_id(0) * block_size
x_block_ptr = tl.make_block_ptr(x_ptr, shape=(size,), strides=(1,),
offsets=(offset,), block_shape=(block_size,), order=(0,))
y_block_ptr = tl.make_block_ptr(y_ptr, shape=(size,), strides=(1,),
offsets=(offset,), block_shape=(block_size,), order=(0,))
if boundary_check:
x = tl.load(x_block_ptr, boundary_check=(0,))
y = tl.load(y_block_ptr, boundary_check=(0,))
else:
x = tl.load(x_block_ptr)
y = tl.load(y_block_ptr)
z = x + y
z_block_ptr = tl.make_block_ptr(z_ptr, shape=(size,), strides=(1,),
offsets=(offset,), block_shape=(block_size,), order=(0,))
if boundary_check:
tl.store(z_block_ptr, z, boundary_check=(0,))
else:
tl.store(z_block_ptr, z)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/heuristics.py |
79923ee4-c50b-4734-a499-56f92bffc35b | sized_tuned_bwd.py | ROCm/aotriton | tritonsrc/sized_tuned_bwd.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.autotune(configs=TRITON_CONFIG_LIST_BWD_SIZED, key=['BLOCK_DMODEL',
'max_seqlen_q', 'max_seqlen_k'])
@triton.jit
def sized_tuned_bwd_kernel_dk_dv(Q, K, V, B, sm_scale, Out, DO, DK, DV, L,
D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dkz, stride_dkh, stride_dkn, stride_dkk,
stride_dvz, stride_dvh, stride_dvk, stride_dvn, cu_seqlens_q,
cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k, head_dim,
dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.
constexpr):
bare_bwd_kernel_dk_dv(Q, K, V, B, sm_scale, Out, DO, DK, DV, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dkz, stride_dkh, stride_dkn,
stride_dkk, stride_dvz, stride_dvh, stride_dvk, stride_dvn,
cu_seqlens_q, cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k,
head_dim, dropout_p, philox_seed, philox_offset_base, BLOCK_M,
BLOCK_DMODEL, BLOCK_N, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD=
PADDED_HEAD, BIAS_TYPE=BIAS_TYPE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/sized_tuned_bwd.py |
eef87b29-d8f2-420d-a9a1-7f119fb0bc3c | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4]], key=['BT', 'BK', 'BV'])
@triton.jit
def bwd_prepare_wy_repr_kernel(k, v, beta, g, Aw, Au, dw, du, dk, dv, dbeta,
dg, offsets, indices, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr,
V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
b_dbeta = tl.zeros([BT], dtype=tl.float32)
b_dA = tl.zeros([BT, BT], dtype=tl.float32)
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_A = tl.make_block_ptr(Aw + i_bh * T * BT, (BT, T), (1, BT), (0,
i_t * BT), (BT, BT), (0, 1))
else:
p_beta = tl.make_block_ptr(beta + (bos * H + i_h), (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_A = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (BT, T), (1, H *
BT), (0, i_t * BT), (BT, BT), (0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_beta = tl.load(p_beta, boundary_check=(0,))
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dw = tl.make_block_ptr(dw + i_bh * T * K, (T, K), (K, 1), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dw = tl.make_block_ptr(dw + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_k_beta = (b_k * b_beta[:, None]).to(b_k.dtype)
b_dw = tl.load(p_dw, boundary_check=(0, 1))
b_dA += tl.dot(b_dw, tl.trans(b_k_beta), allow_tf32=False)
b_dk_beta = tl.dot(b_A, b_dw, allow_tf32=False)
b_dk = b_dk_beta * b_beta[:, None]
b_dbeta += tl.sum(b_dk_beta * b_k, 1)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
b_dA = tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :],
b_dA, 0)
b_dA = tl.dot(b_dA.to(b_A.dtype), b_A)
b_dA = tl.dot(b_A, b_dA.to(b_A.dtype))
b_dA = tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :],
-b_dA, 0).to(k.dtype.element_ty)
if HEAD_FIRST:
p_A = tl.make_block_ptr(Au + i_bh * T * BT, (BT, T), (1, BT), (0,
i_t * BT), (BT, BT), (0, 1))
else:
p_A = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (BT, T), (1, H *
BT), (0, i_t * BT), (BT, BT), (0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_dA2 = tl.zeros([BT, BT], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_du = tl.make_block_ptr(du + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_du = tl.make_block_ptr(du + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_v_beta = (b_v * b_beta[:, None]).to(b_v.dtype)
b_du = tl.load(p_du, boundary_check=(0, 1))
b_dA2 += tl.dot(b_du, tl.trans(b_v_beta), allow_tf32=False)
b_dv_beta = tl.dot(b_A, b_du, allow_tf32=False)
b_dv = b_dv_beta * b_beta[:, None]
b_dbeta += tl.sum(b_dv_beta * b_v, 1)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
b_dA2 = tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :],
b_dA2, 0)
b_dA2 = tl.dot(b_dA2.to(b_A.dtype), b_A)
b_dA2 = tl.dot(b_A, b_dA2.to(b_A.dtype))
b_dA2 = tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :],
-b_dA2, 0).to(k.dtype.element_ty)
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,
), (0,))
else:
p_g = tl.make_block_ptr(g + (bos * H + i_h), (T,), (H,), (i_t * BT,
), (BT,), (0,))
b_g = tl.load(p_g, boundary_check=(0,))
b_dA += b_dA2 * tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[
None, :], tl.exp(b_g[:, None] - b_g[None, :]), 0)
b_dA = b_dA.to(k.dtype.element_ty)
b_A = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_dk = tl.load(p_dk, boundary_check=(0, 1))
b_k_beta = (b_k * b_beta[:, None]).to(b_k.dtype)
b_A += tl.dot(b_k_beta, tl.trans(b_k))
b_dk_beta = tl.dot(b_dA, b_k, allow_tf32=False)
b_dbeta += tl.sum(b_dk_beta * b_k, 1)
b_dk += tl.dot(tl.trans(b_dA), b_k_beta, allow_tf32=False)
b_dk += b_dk_beta * b_beta[:, None]
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
b_A = b_A * tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None,
:], tl.exp(b_g[:, None] - b_g[None, :]), 0)
b_A *= b_dA2
b_dg = tl.sum(b_A, axis=1) - tl.sum(b_A, axis=0)
if HEAD_FIRST:
p_dg = tl.make_block_ptr(dg + i_bh * T, (T,), (1,), (i_t * BT,), (
BT,), (0,))
p_dbeta = tl.make_block_ptr(dbeta + i_bh * T, (T,), (1,), (i_t * BT
,), (BT,), (0,))
else:
p_dg = tl.make_block_ptr(dg + (bos * H + i_h), (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_dbeta = tl.make_block_ptr(dbeta + (bos * H + i_h), (T,), (H,), (
i_t * BT,), (BT,), (0,))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0,))
tl.store(p_dbeta, b_dbeta.to(p_dbeta.dtype.element_ty), boundary_check=(0,)
)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/wy_fast.py |
bf07f58b-4dc1-4f3e-8074-7f90a37688c0 | naive_associative_rnn_scan.py | TushaarGVS/linear-rnn | linear_rnn/triton/naive_associative_rnn_scan.py | 48320589b73154484be7d09a144923a2b9e56b85 | 0 | @triton.jit
def _naive_associative_rnn_scan_bwd_kernel():
pass
| {
"Data Type": [],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/naive_associative_rnn_scan.py |
6008f8eb-f05d-46b8-ba50-eaf48490ed4d | rand_init.py | gmgu/study-triton | 6_random/rand_init.py | 3a9a24fd3f1de3e7465535ffe72f6deac8a419bd | 0 | @triton.jit
def rand_init(out_ptr, n, seed, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n
random = tl.rand(seed, offsets)
tl.store(out_ptr + offsets, random, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/6_random/rand_init.py |
af1c833d-4eeb-4313-a034-3b8bf3198b9d | main_triton.py | dwgan/GraphMST | main_triton.py | 4d65ed0f108d339e3e4cfff25085a39adc6a48a2 | 0 | @triton.jit
def union_kernel(parent, rank, u, v, BLOCK_SIZE: tl.constexpr):
root_u = tl.zeros((BLOCK_SIZE,), dtype=tl.int32)
root_v = tl.zeros((BLOCK_SIZE,), dtype=tl.int32)
find_kernel(parent, u, root_u, BLOCK_SIZE=BLOCK_SIZE)
find_kernel(parent, v, root_v, BLOCK_SIZE=BLOCK_SIZE)
tl.syncwarp()
root_u = root_u[0]
root_v = root_v[0]
if root_u != root_v:
ru_rank = tl.load(rank + root_u)
rv_rank = tl.load(rank + root_v)
if ru_rank > rv_rank:
tl.store(parent, root_v, root_u)
elif ru_rank < rv_rank:
tl.store(parent, root_u, root_v)
else:
tl.store(parent, root_v, root_u)
tl.atomic_add(rank, root_u, 1)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Shared Memory Intensive"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/dwgan/GraphMST/blob/4d65ed0f108d339e3e4cfff25085a39adc6a48a2/main_triton.py |
faf1b309-5072-4c15-ba21-d9fb5786455c | 06-fused-attention.py | triton-lang/triton | python/tutorials/06-fused-attention.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _attn_bwd(Q, K, V, sm_scale, DO, DQ, DK, DV, M, D, stride_z, stride_h,
stride_tok, stride_d, H, N_CTX, BLOCK_M1: tl.constexpr, BLOCK_N1: tl.
constexpr, BLOCK_M2: tl.constexpr, BLOCK_N2: tl.constexpr,
BLK_SLICE_FACTOR: tl.constexpr, HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, Q, k, v, sm_scale, DO, M, D, stride_tok,
stride_d, H, N_CTX, MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, start_n,
start_m, num_steps, MASK=True)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, Q, k, v, sm_scale, DO, M, D, stride_tok,
stride_d, H, N_CTX, BLOCK_M1, BLOCK_N1, HEAD_DIM, start_n, start_m,
num_steps, MASK=False)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
)
m = tl.load(M + offs_m)
m = m[:, None]
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, do, m, D, stride_tok, stride_d, H, N_CTX,
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, start_m, end_n - num_steps *
MASK_BLOCK_N2, num_steps, MASK=True)
end_n -= num_steps * MASK_BLOCK_N2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, do, m, D, stride_tok, stride_d, H, N_CTX,
BLOCK_M2, BLOCK_N2, HEAD_DIM, start_m, end_n - num_steps * BLOCK_N2,
num_steps, MASK=False)
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py |
21eb0b3f-9ba8-480f-bb96-a5685bc5fff5 | snake.py | falkaer/multi-scale-music | snake.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16)], reset_to_zero=['DYDA',
'DYDC'], key=['C'])
@triton.jit
def _snake_bwd_triton(X, OUT, ALPHA, CR, GRAD, DYDX, DYDA, DYDC, X_stride1,
X_stride2, X_stride3, OUT_stride1, OUT_stride2, OUT_stride3,
GRAD_stride1, GRAD_stride2, GRAD_stride3, DYDX_stride1, DYDX_stride2,
DYDX_stride3, DYDA_stride, DYDC_stride, ALPHA_stride, CR_stride, C, N,
CORR: tl.constexpr, X_NEEDS_GRAD: tl.constexpr, ALPHA_NEEDS_GRAD: tl.
constexpr, CR_NEEDS_GRAD: tl.constexpr, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
batch_idx = pid // C
channel_idx = pid % C
block_start = tl.program_id(1) * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
GRAD = GRAD + batch_idx * GRAD_stride1 + channel_idx * GRAD_stride2
grad = tl.load(GRAD + offsets * GRAD_stride3, mask=offsets < N, other=0)
if CORR:
cr = tl.load(CR + channel_idx * CR_stride)
if ALPHA_NEEDS_GRAD | CR_NEEDS_GRAD:
OUT = OUT + batch_idx * OUT_stride1 + channel_idx * OUT_stride2
out = tl.load(OUT + offsets * OUT_stride3, mask=offsets < N, other=0)
outgrad = tl.sum(out * grad, axis=0)
if X_NEEDS_GRAD | ALPHA_NEEDS_GRAD:
X = X + batch_idx * X_stride1 + channel_idx * X_stride2
x = tl.load(X + offsets * X_stride3, mask=offsets < N, other=0)
alpha = tl.load(ALPHA + channel_idx * ALPHA_stride)
sin2ax = tl.sin((2 * alpha * x).to(tl.float32)).to(x.type)
dydx = (sin2ax + 1) * grad
if CORR:
dydx = dydx / cr
if X_NEEDS_GRAD:
DYDX = DYDX + batch_idx * DYDX_stride1 + channel_idx * DYDX_stride2
tl.store(DYDX + offsets * DYDX_stride3, dydx, mask=offsets < N)
if ALPHA_NEEDS_GRAD:
dyda = (tl.sum(x * dydx, axis=0) - outgrad) / alpha
tl.atomic_add(DYDA + channel_idx * DYDA_stride, dyda)
if CR_NEEDS_GRAD:
dydc = -outgrad / cr
tl.atomic_add(DYDC + channel_idx * DYDC_stride, dydc)
| {
"Data Type": [],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Shared Memory Intensive"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/snake.py |
b98029f7-6d1d-4960-8e62-6b7ea5e45c0e | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_bwd_kernel_dh(q, z, do, dh, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t,
s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT:
tl.constexpr, NORMK: tl.constexpr):
i_k, i_v, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
b_zp = tl.full([BK if NORMK else BV], float('inf'), dtype=tl.float32)
for i_t in range(NT - 1, -1, -1):
i_p = tl.maximum(i_t * BT - 1, 0)
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dh = tl.make_block_ptr(dh + i_bh * s_h_h + i_t * K * V, (K, V), (
s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_do = tl.load(p_do, boundary_check=(0, 1))
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
if NORMK:
p_z = tl.make_block_ptr(z + i_bh * s_k_h, (K, T), (s_k_d, s_k_t
), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_zc = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,),
(i_p * K + i_k * BK,), (BK,), (0,))
b_zc = tl.load(p_zc, boundary_check=(0,))
b_r, b_zp = tl.exp(b_zc - b_zp), b_zc
b_z = tl.load(p_z, boundary_check=(0, 1))
b_q = (b_q * tl.exp(b_zc[:, None] - b_z)).to(b_q.dtype)
b_dh = b_dh * b_r[:, None]
else:
p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d
), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_zc = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,),
(i_p * V + i_v * BV,), (BV,), (0,))
b_zc = tl.load(p_zc, boundary_check=(0,))
b_r, b_zp = tl.exp(b_zc - b_zp), b_zc
b_z = tl.load(p_z, boundary_check=(0,))
b_do = (b_do * tl.exp(b_zc[None, :] - b_z)).to(b_do.dtype)
b_dh = b_dh * b_r[None, :]
b_dh += tl.dot(b_q, b_do, allow_tf32=False)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
4ff74bb4-943c-4c66-937e-646014c4ff27 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def softmax(input, log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
7dd66f4e-95eb-48ec-8b08-1f9ced106147 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/based/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def _parallel_based_bwd_dq(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dq,
s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T, scale, BTL: tl.
constexpr, BTS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, K: tl
.constexpr, V: tl.constexpr):
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_c * BTL, i_v * BV), (BTL, BV), (1, 0))
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTL, BK), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype)
b_q = (b_q * scale).to(b_q.dtype)
b_dq = tl.zeros([BTL, BK], dtype=tl.float32)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (0,
i_k * BK), (BTS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (i_v *
BV, 0), (BV, BTS), (0, 1))
p_dz = dz + i_bh * T + i_c * BTL + tl.arange(0, BTL)
b_dz = tl.load(p_dz, mask=i_c * BTL + tl.arange(0, BTL) < T)
for _ in range(0, i_c * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
if i_v == 0:
b_ds += b_dz[:, None]
else:
b_ds = b_ds
b_s = tl.dot(b_q, tl.trans(b_k), allow_tf32=False)
b_dq += tl.dot((b_ds * (1 + b_s)).to(b_v.dtype), b_k, allow_tf32=False)
p_k = tl.advance(p_k, (BTS, 0))
p_v = tl.advance(p_v, (0, BTS))
b_dq *= scale
o_q = tl.arange(0, BTL)
o_k = tl.arange(0, BTS)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (i_v *
BV, i_c * BTL), (BV, BTS), (0, 1))
for _ in range(i_c * BTL, (i_c + 1) * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
m_s = o_q[:, None] >= o_k[None, :]
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
if i_v == 0:
b_ds += b_dz[:, None]
else:
b_ds = b_ds
b_ds = tl.where(m_s, b_ds, 0) * scale
b_s = tl.dot(b_q, tl.trans(b_k), allow_tf32=False)
b_s = tl.where(m_s, b_s, 0)
b_dq += tl.dot((b_ds + b_ds * b_s).to(b_k.dtype), b_k, allow_tf32=False
)
p_k = tl.advance(p_k, (BTS, 0))
p_v = tl.advance(p_v, (0, BTS))
o_k += BTS
p_dq = tl.make_block_ptr(dq + (i_bh + B * H * i_v) * s_k_h, (T, K), (
s_k_t, s_k_d), (i_c * BTL, i_k * BK), (BTL, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/based/parallel.py |
35bf1229-189c-4ad2-b417-b760322afbf3 | positional_embedding.py | sjjeong94/ai_compiler_study | aicom/positional_embedding.py | e87284aab74acab704e2d192190be446e328e1c6 | 0 | @triton.jit
def rope_bw(dx_ptr, f_ptr, dt_ptr, dx_s_stride, f_s_stride, dt_s_stride, d,
d2, BLOCK_SIZE: tl.constexpr):
s_idx = tl.program_id(0)
bh_idx = tl.program_id(1)
dx_start_ptr = dx_ptr + s_idx * dx_s_stride
f_start_ptr = f_ptr + s_idx * f_s_stride
dt_start_ptr = dt_ptr + s_idx * dt_s_stride
d2_half = d2 // 2
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < d2_half
f0_ptrs = f_start_ptr + col_offsets
f1_ptrs = f_start_ptr + col_offsets + d2_half
f0 = tl.load(f0_ptrs, mask=mask, other=0.0)
cos0 = tl.cos(f0)
sin0 = tl.sin(f0)
f1 = tl.load(f1_ptrs, mask=mask, other=0.0)
cos1 = tl.cos(f1)
sin1 = tl.sin(f1)
dx0_ptrs = dx_start_ptr + bh_idx * d + col_offsets
dx1_ptrs = dx_start_ptr + bh_idx * d + col_offsets + d2_half
dx0 = tl.load(dx0_ptrs, mask=mask, other=0.0)
dx1 = tl.load(dx1_ptrs, mask=mask, other=0.0)
dt0 = dx0 * cos0 + dx1 * sin1
dt1 = dx1 * cos1 - dx0 * sin0
dt0_ptrs = dt_start_ptr + bh_idx * d + col_offsets
dt1_ptrs = dt_start_ptr + bh_idx * d + col_offsets + d2_half
tl.store(dt0_ptrs, dt0, mask=mask)
tl.store(dt1_ptrs, dt1, mask=mask)
if d2 < d:
remainder = d - d2
q, r = remainder // BLOCK_SIZE, remainder % BLOCK_SIZE
for i in range(q):
dx2_ptrs = (dx_start_ptr + bh_idx * d + col_offsets + d2 +
BLOCK_SIZE * i)
dt2_ptrs = (dt_start_ptr + bh_idx * d + col_offsets + d2 +
BLOCK_SIZE * i)
dx2 = tl.load(dx2_ptrs)
tl.store(dt2_ptrs, dx2)
if r > 0:
dx2_ptrs = (dx_start_ptr + bh_idx * d + col_offsets + d2 +
BLOCK_SIZE * q)
dt2_ptrs = (dt_start_ptr + bh_idx * d + col_offsets + d2 +
BLOCK_SIZE * q)
mask = col_offsets < r
dx2 = tl.load(dx2_ptrs, mask=mask, other=0.0)
tl.store(dt2_ptrs, dx2, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sjjeong94/ai_compiler_study/blob/e87284aab74acab704e2d192190be446e328e1c6/aicom/positional_embedding.py |
f0a324aa-5bec-4ba9-8dee-d1c6f875aff3 | triton_jagged_tensor_ops.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def triton_jagged_to_dense(jagged_value_ptr, jagged_offsets_ptr,
jagged_value_row_stride, output_dense_ptr, dense_indices_ptr,
dense_col_stride, dense_row_stride, dense_matrix_stride, JAGGED_DIM: tl
.constexpr, thread_block_row_size: tl.constexpr, thread_block_col_size:
tl.constexpr, operation_function: tl.constexpr, operation_dense) ->None:
pid = tl.program_id(0)
begin = tl.load(jagged_offsets_ptr + pid)
end = tl.load(jagged_offsets_ptr + (pid + 1))
jagged_value_ptr += begin * jagged_value_row_stride
if JAGGED_DIM > 2:
dense_indice = tl.load(dense_indices_ptr + pid)
if dense_indice == -1:
return
output_dense_ptr += dense_indice
if operation_function is not None:
operation_dense += dense_indice
else:
output_dense_ptr += pid * dense_matrix_stride
if operation_function is not None:
operation_dense += pid * dense_matrix_stride
offset_row = tl.arange(0, thread_block_row_size)
N = tl.minimum(dense_row_stride, jagged_value_row_stride)
M = tl.minimum(dense_matrix_stride // dense_row_stride, end - begin)
for _i in range(begin, end, thread_block_row_size):
offset_col = tl.arange(0, thread_block_col_size)
block_offset = offset_row[:, None] * dense_row_stride + offset_col[
None, :] * dense_col_stride
for _j in range(0, N, thread_block_col_size):
mask = (offset_row[:, None] < M) & (offset_col[None, :] < N)
jagged_val = tl.load(jagged_value_ptr + block_offset, mask=mask,
other=0)
if operation_function is not None:
val1 = jagged_val
val2 = tl.load(operation_dense + block_offset, mask=mask,
other=0)
if operation_function == 'add':
jagged_val = tensor_elementwise_add(val1, val2)
else:
jagged_val = tensor_elementwise_mul(val1, val2)
tl.store(output_dense_ptr + block_offset, jagged_val, mask=mask)
offset_col += thread_block_col_size
block_offset += thread_block_col_size
offset_row += thread_block_row_size
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py |
7373edff-5262-4dfe-8b20-5d8d947fc6be | triton_chunk.py | NX-AI/xlstm-jax | xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py | 6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7 | 0 | @triton.jit
def chunk_mlstm_fwd_kernel_C(k, v, C, n, m, i, f, initial_C, initial_n,
initial_m, final_C, final_n, final_m, s_qk_h, s_qk_t, s_qk_d, s_vh_h,
s_vh_t, s_vh_d, s_C_h, s_C_t, s_n_h, H: tl.constexpr, T: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr,
BV: tl.constexpr, NT: tl.constexpr, USE_INITIAL_STATE: tl.constexpr,
STORE_FINAL_STATE: tl.constexpr):
i_k, i_v, i_bC = tl.program_id(0), tl.program_id(1), tl.program_id(2)
if USE_INITIAL_STATE:
p_C0 = tl.make_block_ptr(initial_C + i_bC * K * V, (K, V), (V, 1),
(i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_n0 = tl.make_block_ptr(initial_n + i_bC * K, (K,), (1,), (i_k *
BK,), (BK,), (0,))
p_m0 = initial_m
b_C = tl.load(p_C0, boundary_check=(0, 1)).to(tl.float32)
b_n = tl.load(p_n0, boundary_check=(0,)).to(tl.float32)
b_m = tl.load(p_m0).to(tl.float32)
else:
b_C = tl.zeros([BK, BV], dtype=tl.float32)
b_n = tl.zeros([BK], dtype=tl.float32)
b_m = 0.0
b_m_next = 0.0
for i_t in range(NT):
p_k = tl.make_block_ptr(k + i_bC * s_qk_h, (K, T), (s_qk_d, s_qk_t),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_C = tl.make_block_ptr(C + i_bC * s_C_h + i_t * K * V, (K, V), (
s_C_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_n = tl.make_block_ptr(n + i_bC * s_n_h + i_t * K, (K,), (1,), (
i_k * BK,), (BK,), (0,))
tl.store(p_C, b_C.to(p_C.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_n, b_n.to(p_n.dtype.element_ty), boundary_check=(0,))
tl.store(m + i_bC * (NT + 1) + i_t, b_m)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_f_last = tl.load(f + i_bC * T + i_t * BT + BT - 1)
b_f = tl.load(f + i_bC * T + i_t * BT + tl.arange(0, BT))
b_i = tl.load(i + i_bC * T + i_t * BT + tl.arange(0, BT))
b_g = b_i + b_f_last - b_f
b_m_next, _ = tl.max(b_g)
b_m_next = tl.maximum(b_f_last + b_m, b_m_next)
b_C *= tl.math.exp2(b_f_last - b_m_next + b_m)
b_n *= tl.math.exp2(b_f_last - b_m_next + b_m)
b_C += tl.dot(b_k, (b_v * tl.math.exp2(b_g - b_m_next)[:, None]).to
(b_k.dtype), allow_tf32=False)
b_n += tl.sum(b_k * tl.math.exp2(b_g - b_m_next), axis=1)
b_m = b_m_next
if STORE_FINAL_STATE:
p_Ct = tl.make_block_ptr(final_C + i_bC * K * V, (K, V), (V, 1), (
i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_n = tl.make_block_ptr(final_n + i_bC * K, (K,), (1,), (i_k * BK,),
(BK,), (0,))
tl.store(p_Ct, b_C.to(p_Ct.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_n, b_n.to(p_n.dtype.element_ty), boundary_check=(0,))
tl.store(final_m + i_bC, b_m)
tl.store(m + i_bC * (NT + 1) + NT, b_m)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Low Latency"
]
} | [
"Apache",
"BSD"
] | https://github.com/NX-AI/xlstm-jax/blob/6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7/xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py |
0d15dee4-56b7-4d6d-bb33-39e4b5938078 | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=MATMUL_CONFIGS, key=['m_key', 'n_key', 'k_key'],
prune_configs_by={'early_config_prune': early_config_prune,
'perf_model': estimate_matmul_time, 'top_k': 10})
@triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] *
args['SPLIT_K']) == 0})
@triton.jit
def _kernel_matmul_fp8_block_slowacc(A, B, C, M, N, K, m_key, n_key, k_key,
A_scale, B_scale, scale_block_m: tl.constexpr, scale_block_n: tl.
constexpr, scale_block_k: tl.constexpr, stride_am, stride_ak, stride_bn,
stride_bk, stride_cm, stride_cn, stride_scale_am, stride_scale_ak,
stride_scale_bn, stride_scale_bk, dot_out_dtype: tl.constexpr,
allow_tf32: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr, AB_DTYPE: tl.constexpr) ->None:
"""Matmul kernel of [M, K] @ [N, K] with block-wise scales
Performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles and
A and B scaled by a scaling factor per [scale_block_m, scale_block_k] and
[scale_block_n, scale_block_k] tiles
respectively.
Todo:
* Support scale_block_{mnk} < BLOCK{MNK} for each dim.
Args:
A (TensorWrapper): [M, K] input tensor.
B (TensorWrapper): [N, K] input tensor.
C (TensorWrapper): [M, N] output tensor.
M (int): M dimension of input tensor.
N (int): N dimension of input tensor.
K (int): K dimension of input tensor.
m_key (int): Autotuning key for M dimension of input tensor.
n_key (int): Autotuning key for N dimension of input tensor.
k_key (int): Autotuning key for K dimension of input tensor.
A_scale (TensorWrapper): [cdiv(M, scale_block_m), cdiv(K, scale_block_k)] reciprocal scale tensor per block. A * A_scale = original A
B_scale (TensorWrapper): [cdiv(N, scale_block_n), cdiv(K, scale_block_k)] reciprocal scale tensor per block. B * B_scale = original B
scale_block_m (int): Block size for M dimension of A_scale.
scale_block_n (int): Block size for N dimension of B_scale.
scale_block_k (int): Block size for K dimension of A_scale and B_scale.
stride_am (int): Stride of M dimension of A.
stride_ak (int): Stride of K dimension of A.
stride_bn (int): Stride of N dimension of B.
stride_bk (int): Stride of K dimension of B.
stride_cm (int): Stride of M dimension of C.
stride_cn (int): Stride of N dimension of C.
stride_scale_am (int): Stride of M dimension of A_scale.
stride_scale_ak (int): Stride of K dimension of A_scale.
stride_scale_bn (int): Stride of N dimension of B_scale.
stride_scale_bk (int): Stride of K dimension of B_scale.
dot_out_dtype (torch.dtype): Output type of tensor core.
allow_tf32 (bool): Whether to use TF32 for tensor core.
fp8_fast_accum (bool): Whether to use fast accumulation for tensor core.
BLOCK_M (int): Block size for M dimension.
BLOCK_N (int): Block size for N dimension.
BLOCK_K (int): Block size for K dimension.
GROUP_M (int): Number of groups for M dimension swizzle.
SPLIT_K (int): Number of SM's to launch per row.
EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K.
AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core.
"""
assert BLOCK_M < scale_block_m
assert BLOCK_N < scale_block_n
assert BLOCK_K < scale_block_k
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
scale_m = pid_m * BLOCK_M // scale_block_m
scale_n = pid_n * BLOCK_N // scale_block_n
_0 = tl.zeros((1, 1), dtype=C.dtype.element_ty)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
pid_k = k * SPLIT_K + pid_z
scale_k = pid_k * BLOCK_K // scale_block_k
a_scale = tl.load(A_scale + scale_m * stride_scale_am + scale_k *
stride_scale_ak)
b_scale = tl.load(B_scale + scale_n * stride_scale_bn + scale_k *
stride_scale_bk)
scale = a_scale * b_scale
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
k_remaining = K - k * (BLOCK_K * SPLIT_K)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0)
b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0)
if AB_DTYPE:
a = a.to(C.dtype.element_ty)
b = b.to(C.dtype.element_ty)
acc += tl.dot(a, b, out_dtype=dot_out_dtype, allow_tf32=allow_tf32
) * scale
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
acc = acc.to(C.dtype.element_ty)
c = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
if SPLIT_K == 1:
tl.store(c, acc, mask=mask)
else:
tl.atomic_add(c, acc, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
9c7bbe01-fd64-4864-80d0-e77e220fab0a | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BT', 'BK', 'BV'])
@triton.jit
def fused_chunk_delta_rule_bwd_kernel(q, k, v, d, dht, dh0, do, dq, dk, dv,
dd, initial_state, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T,
scale, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, DK: tl.
constexpr, DV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, USE_DHT:
tl.constexpr, USE_DHO: tl.constexpr, CHECK: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_i = tl.arange(0, BT)
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_DHT:
p_dht = tl.make_block_ptr(dht + i_bh * DK * DV, (DK, DV), (DV, 1),
(i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dht, boundary_check=(0, 1)).to(tl.float32)
m_s = o_i[:, None] <= o_i[None, :]
for i in range(tl.cdiv(T, BT) - 1, -1, -1):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (DK, T), (s_k_d, s_k_t),
(i_k * BK, i * BT), (BK, BT), (0, 1))
p_d = tl.make_block_ptr(d + i_bh * s_k_h, (DK, T), (s_k_d, s_k_t),
(i_k * BK, i * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, DK), (s_k_t, s_k_d),
(i * BT, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, DV), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, DV), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
p_dk = tl.make_block_ptr(dk + (i_bh + i_v * B * H) * s_k_h, (T, DK),
(s_k_t, s_k_d), (i * BT, i_k * BK), (BT, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_bh + i_k * B * H) * s_v_h, (T, DV),
(s_v_t, s_v_d), (i * BT, i_v * BV), (BT, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False)
b_ds = tl.where(m_s, b_ds, 0).to(b_q.dtype)
b_s = tl.dot(b_k, b_q, allow_tf32=False)
b_s = tl.where(m_s, b_s, 0).to(b_q.dtype)
b_dk = tl.dot(b_ds, tl.trans(b_q), allow_tf32=False)
b_dv = tl.dot(b_s, b_do, allow_tf32=False)
b_d = tl.load(p_d, boundary_check=(0, 1))
b_dk += tl.dot(b_v, tl.trans(b_dh).to(b_v.dtype), allow_tf32=False)
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False)
b_dh += tl.dot(b_q, b_do, allow_tf32=False)
b_dh -= tl.dot(b_d, b_dv.to(b_d.dtype), allow_tf32=False)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
if USE_DHO:
p_dh0 = tl.make_block_ptr(dh0 + i_bh * DK * DV, (DK, DV), (DV, 1),
(i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
b_h = None
tl.debug_barrier()
m_s = o_i[:, None] >= o_i[None, :]
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(initial_state + i_bh * DK * DV, (DV, DK), (
1, DV), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
b_h += tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
NT = tl.cdiv(T, BT)
for i in range(0, NT):
p_dv = tl.make_block_ptr(dv + i_bh * s_v_h, (T, DV), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
b_dv = tl.load(p_dv, boundary_check=(0, 1))
b_dd = tl.dot(b_dv.to(k.dtype.element_ty), b_h.to(k.dtype.
element_ty), allow_tf32=False)
p_dd = tl.make_block_ptr(dd + (i_bh + i_v * B * H) * s_k_h, (T, DK),
(s_k_t, s_k_d), (i * BT, i_k * BK), (BT, BK), (1, 0))
tl.store(p_dd, -b_dd.to(p_dd.dtype.element_ty), boundary_check=(0, 1))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, DK), (s_k_t, s_k_d),
(i * BT, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (DV, T), (s_v_d, s_v_t),
(i_v * BV, i * BT), (BV, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, DV), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
p_dq = tl.make_block_ptr(dq + (i_bh + i_v * B * H) * s_k_h, (T, DK),
(s_k_t, s_k_d), (i * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
b_ds = tl.where(m_s, b_ds, 0)
b_dq = tl.dot(b_ds.to(b_k.dtype), b_k, allow_tf32=False)
if CHECK and i == 0:
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_h = b_h + tl.dot(b_v, b_k, allow_tf32=False)
else:
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_h = b_h + tl.dot(b_v, b_k, allow_tf32=False)
b_dq *= scale
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/fused_chunk.py |
16c05989-05bd-492f-89cf-86a814ede982 | mhmoe.py | dtadpole/triton-playground | mhmoe.py | 2d317976722d63080133b1bf88b1f0cdec98f831 | 0 | @triton.jit
def d_silu(x, o):
sig = tl.sigmoid(x)
return sig + o * (1 - sig)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py |
fd1f4159-2938-4d88-91f3-e1646ab75398 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def calc_mean_and_inv_std(input, last_dim, eps, last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
dc7321dc-32fb-4fed-9ba5-5273d19cb153 | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES, BLOCK_SIZES, NUM_WARPS, NUM_STAGES)],
key=['M'])
@triton.jit
def triton_jagged_sum_kernel_variable_length_loop_sum_then_buffer(
input_ptr_values, input_ptr_offsets, output_ptr, M, BLOCK_SIZE_RAGGED:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((1, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load(
input_ptr_offsets + (pid_b + 1))
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer += tl.sum(input, axis=0)
buffer_view = buffer.reshape((BLOCK_SIZE_M,))
output_offsets = offsets_m + pid_b * M
output_mask = output_offsets < M * (pid_b + 1)
tl.store(output_ptr + output_offsets, buffer_view, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_sum/kernels.py |
e1d634ae-75e9-4158-a173-e373bc727999 | triton_fused_attn_rerope.py | LouChao98/vqtree | ops/triton_fused_attn_rerope.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q1, Q2, K1, K2, V, sm_scale, Out, stride_qz, stride_qh,
stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh,
stride_om, stride_on, seqlen_q, seqlen_k, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, IS_CAUSAL: tl.
constexpr, WINDOW: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
q_offset = off_hz * stride_qh
kv_offset = off_hz * stride_kh
Q1_block_ptr = tl.make_block_ptr(base=Q1 + q_offset, shape=(seqlen_q,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
Q2_block_ptr = tl.make_block_ptr(base=Q2 + q_offset, shape=(seqlen_q,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
K1_block_ptr = tl.make_block_ptr(base=K1 + kv_offset, shape=(
BLOCK_DMODEL, seqlen_k), strides=(stride_kk, stride_kn), offsets=(0,
0), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
K2_block_ptr = tl.make_block_ptr(base=K2 + kv_offset, shape=(
BLOCK_DMODEL, seqlen_k), strides=(stride_kk, stride_kn), offsets=(0,
0), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + kv_offset, shape=(seqlen_k,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
qk_scale = sm_scale * 1.44269504
if EVEN_M:
q1 = tl.load(Q1_block_ptr)
q2 = tl.load(Q2_block_ptr)
else:
q1 = tl.load(Q1_block_ptr, boundary_check=(0,), padding_option='zero')
q2 = tl.load(Q2_block_ptr, boundary_check=(0,), padding_option='zero')
lo = 0
hi = tl.minimum((start_m + 1) * BLOCK_M, seqlen_k
) if IS_CAUSAL else seqlen_k
for start_n in range(lo, hi, BLOCK_N):
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if IS_CAUSAL:
qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk,
float('-inf'))
if not EVEN_N:
qk = tl.where(start_n + offs_n[None, :] < seqlen_k, qk, float(
'-inf'))
if start_n <= start_m * BLOCK_M - WINDOW - BLOCK_N or start_n >= (
start_m + 1) * BLOCK_M + WINDOW:
if EVEN_N & EVEN_M:
k2 = tl.load(K2_block_ptr)
else:
k2 = tl.load(K2_block_ptr, boundary_check=(1,),
padding_option='zero')
qk += tl.dot(q2, k2)
elif start_n > (start_m + 1
) * BLOCK_M - WINDOW and start_n < start_m * BLOCK_M + WINDOW - BLOCK_N:
if EVEN_N & EVEN_M:
k1 = tl.load(K1_block_ptr)
else:
k1 = tl.load(K1_block_ptr, boundary_check=(1,),
padding_option='zero')
qk += tl.dot(q1, k1)
else:
if EVEN_N & EVEN_M:
k1 = tl.load(K1_block_ptr)
k2 = tl.load(K2_block_ptr)
else:
k1 = tl.load(K1_block_ptr, boundary_check=(1,),
padding_option='zero')
k2 = tl.load(K2_block_ptr, boundary_check=(1,),
padding_option='zero')
qk1 = tl.dot(q1, k1)
qk2 = tl.dot(q2, k2)
qk += tl.where(tl.abs(offs_m[:, None] - (start_n + offs_n[None,
:])) < WINDOW, qk1, qk2)
qk *= qk_scale
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
acc_scale = l_i * 0 + alpha
acc *= acc_scale[:, None]
if EVEN_N & EVEN_M:
v = tl.load(V_block_ptr)
else:
v = tl.load(V_block_ptr, boundary_check=(0,), padding_option='zero'
)
v = tl.load(V_block_ptr)
acc += tl.dot(p.to(v.dtype), v)
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K1_block_ptr = tl.advance(K1_block_ptr, (0, BLOCK_N))
K2_block_ptr = tl.advance(K2_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
acc = acc / l_i[:, None]
O_block_ptr = tl.make_block_ptr(base=Out + q_offset, shape=(seqlen_q,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
offs_d = tl.arange(0, BLOCK_DMODEL)
out_ptrs = Out + q_offset + (offs_m[:, None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_attn_rerope.py |
77311a94-8d98-4965-a46c-7c601a7fd275 | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES, BLOCK_SIZES, NUM_WARPS, NUM_STAGES)],
key=['M'])
@triton.jit
def triton_jagged_sum_kernel_simple_fused_sum_then_buffer(input_ptr_values,
input_ptr_offsets, output_ptr, M, MAX_SEQLEN, BLOCK_SIZE_RAGGED: tl.
constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_ragged = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((1, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_ragged
), tl.load(input_ptr_offsets + (pid_ragged + 1))
for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED):
block_start_ragged = ragged_start + block_pos
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer += tl.sum(input, axis=0)
buffer_view = buffer.reshape((BLOCK_SIZE_M,))
output_offsets = offsets_m + pid_ragged * M
output_mask = output_offsets < M * (pid_ragged + 1)
tl.store(output_ptr + output_offsets, buffer_view, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_sum/kernels.py |
454eebde-6921-487e-b8e0-ee204d96b580 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/retention/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'
] is not None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32,
64, 128] for num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=[
'BT'])
@triton.jit
def chunk_retention_bwd_kernel_dh(q, do, dh, dh0, dht, offsets,
chunk_offsets, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr,
V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
STORE_INITIAL_STATE_GRADIENT: tl.constexpr, USE_FINAL_STATE_GRADIENT:
tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
o_i = tl.arange(0, BT)
d_i = tl.math.exp2((o_i + 1) * b_b)
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dht, boundary_check=(0, 1)).to(tl.float32)
for i_t in range(NT - 1, -1, -1):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_nh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_nh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dh = tl.make_block_ptr(dh + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dh = tl.make_block_ptr(dh + ((boh + i_t) * H + i_h) * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
d_b = tl.math.exp2(min(BT, T - i_t * BT) * b_b)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dh = d_b * b_dh + tl.dot(b_q, (b_do * d_i[:, None]).to(b_q.dtype),
allow_tf32=False)
if STORE_INITIAL_STATE_GRADIENT:
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/chunk.py |
4ca187d4-705d-426a-ab32-956f78d4d117 | sb_fwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_attn/sb_fwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.autotune(configs=get_configs(), key=['token_size', 'head_size'])
@triton.jit
def _forward(Q_ptr, stride_qb, stride_qh, stride_qm: tl.constexpr,
stride_qd: tl.constexpr, K_ptr, stride_kb, stride_kh, stride_kn: tl.
constexpr, stride_kd: tl.constexpr, V_ptr, stride_vb, stride_vh,
stride_vn: tl.constexpr, stride_vd: tl.constexpr, O_ptr, stride_ob,
stride_oh, stride_om: tl.constexpr, stride_od: tl.constexpr, R_ptr,
stride_rb, stride_rh, stride_rm: tl.constexpr, A_ptr, stride_ab,
stride_ah, stride_am: tl.constexpr, W_ptr, stride_wb, stride_wh,
stride_wm, stride_wn, logit_scale: tl.constexpr, attend_current: tl.
constexpr, batch_size, token_size, head_size: tl.constexpr, num_heads:
tl.constexpr, BLOCK_D: tl.constexpr, NO_D_MASK: tl.constexpr, NO_M_MASK:
tl.constexpr, NO_N_MASK: tl.constexpr, ALLOW_TF32: tl.constexpr,
inv_log2: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
no_grad: tl.constexpr=False, acc_dtype: tl.constexpr=tl.float32,
return_attention: tl.constexpr=False, is_compiling: tl.constexpr=False):
tl.static_assert(BLOCK_M % BLOCK_N == 0)
batch_id = tl.program_id(0)
head_pid = tl.program_id(1)
prog_id = tl.program_id(2)
tl.num_programs(2)
seq_length = token_size
qk_scale = inv_log2 * logit_scale
M_range = tl.arange(0, BLOCK_M)
N_range = tl.arange(0, BLOCK_N)
D_range = tl.arange(0, BLOCK_D)
D_mask = D_range < head_size
cm = tl.where(N_range[:, None] >= N_range[None, :], 1.0, 0.0).to(Q_ptr.
type.element_ty)
head_id = head_pid
seq_prog_id = prog_id
Q_head_seq_ptr = Q_ptr + stride_qb * batch_id + stride_qh * head_id
K_head_seq_ptr = K_ptr + stride_kb * batch_id + stride_kh * head_id
V_head_seq_ptr = V_ptr + stride_vb * batch_id + stride_vh * head_id
O_head_seq_ptr = O_ptr + stride_ob * batch_id + stride_oh * head_id
R_head_seq_ptr = R_ptr + stride_rb * batch_id + stride_rh * head_id
A_head_seq_ptr = A_ptr + stride_ab * batch_id + stride_ah * head_id
W_head_seq_ptr = W_ptr + stride_wb * batch_id + stride_wh * head_id
_forward_one_row(seq_prog_id, seq_length, qk_scale, M_range, N_range,
D_range, D_mask, cm, Q_head_seq_ptr, stride_qm, stride_qd,
K_head_seq_ptr, stride_kn, stride_kd, V_head_seq_ptr, stride_vn,
stride_vd, O_head_seq_ptr, stride_om, stride_od, R_head_seq_ptr,
stride_rm, A_head_seq_ptr, stride_am, W_head_seq_ptr, stride_wm,
stride_wn, BLOCK_D, NO_D_MASK, NO_M_MASK, NO_N_MASK, ALLOW_TF32,
BLOCK_M, BLOCK_N, no_grad, acc_dtype, return_attention,
attend_current=attend_current, is_compiling=is_compiling)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Batch-Oriented"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_attn/sb_fwd.py |
4a35494c-3b38-474a-8c6f-d0bfb320f8b8 | swiglu.py | ardywibowo/triton-mode | kernels/swiglu.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_swiglu_backward(grad_output_ptr, input_a_ptr, input_b_ptr,
row_stride, num_columns: tl.constexpr, BLOCK_SIZE: tl.constexpr):
prog_id = tl.program_id(0).to(tl.int64)
grad_output_ptr += prog_id * row_stride
input_a_ptr += prog_id * row_stride
input_b_ptr += prog_id * row_stride
column_offsets = tl.arange(0, BLOCK_SIZE)
active_mask = column_offsets < num_columns
grad_output_row = tl.load(grad_output_ptr + column_offsets, mask=
active_mask, other=0)
input_a_row = tl.load(input_a_ptr + column_offsets, mask=active_mask,
other=0).to(tl.float32)
input_b_row = tl.load(input_b_ptr + column_offsets, mask=active_mask,
other=0)
sigmoid_a = tl.sigmoid(input_a_row)
silu_a = input_a_row * sigmoid_a
grad_b_row = grad_output_row * silu_a
grad_a_row = grad_output_row * (silu_a * (1 - sigmoid_a) + sigmoid_a
) * input_b_row
tl.store(input_a_ptr + column_offsets, grad_a_row, mask=active_mask)
tl.store(input_b_ptr + column_offsets, grad_b_row, mask=active_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/swiglu.py |
c669de30-862b-4b20-b39f-b1ab212762b0 | test_triton_basics.py | tucommenceapousser/xformers | tests/test_triton_basics.py | c97e3d917cfdad4a38acd4e9d776030d25ab9141 | 0 | @triton.jit
def k_rand(X, Y, SEED_X, SEED_Y, stride_x, stride_y, N: tl.constexpr):
"""
Check the random number generation
"""
row = tl.program_id(0)
rand_offsets = tl.arange(0, N)
seed_x = tl.load(SEED_X + row)
randx, _, _, _ = tl.randint4x(seed_x, rand_offsets)
rand_offsets = tl.arange(0, N)
seed_y = tl.load(SEED_Y + row)
randy, _, _, _ = tl.randint4x(seed_y, rand_offsets)
tl.store(X + row * stride_x + tl.arange(0, N), randx)
tl.store(Y + row * stride_y + tl.arange(0, N), randy)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/tucommenceapousser/xformers/blob/c97e3d917cfdad4a38acd4e9d776030d25ab9141/tests/test_triton_basics.py |
1f159cd8-6426-4d71-9d99-6e0ba4030389 | matrix-vector-multiplication.py | northstreet12/triton-cpu | python/tutorials/matrix-vector-multiplication.py | bfb302ffc5fde3b9efe040cb452ddac0454dbb98 | 0 | @triton.jit
def gemv_kernel(Y, A, X, M, N, stride_am, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr):
start_m = tl.program_id(0)
rm = start_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
rn = tl.arange(0, BLOCK_SIZE_N)
A = A + (rm[:, None] * stride_am + rn[None, :])
X = X + rn
acc = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for n in range(N, 0, -BLOCK_SIZE_N):
a = tl.load(A)
x = tl.load(X)
acc += tl.sum(a * x[None, :], axis=1)
A += BLOCK_SIZE_N
X += BLOCK_SIZE_N
Y = Y + rm
tl.store(Y, acc)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/northstreet12/triton-cpu/blob/bfb302ffc5fde3b9efe040cb452ddac0454dbb98/python/tutorials/matrix-vector-multiplication.py |
36107a2c-caa6-4ebc-98a8-495698ac5780 | multi_head_attention_kernels.py | BobMcDear/attorch | attorch/multi_head_attention_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def _bwd_kernel_one_col_block(Q, K, V, sm_scale, qk_scale, Out, DO, DQ, DK,
DV, L, D, Q_block_ptr, K_block_ptr, V_block_ptr, DO_block_ptr,
DQ_block_ptr, DK_block_ptr, DV_block_ptr, stride_dqa, stride_qz,
stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn,
stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, Z, H, N_CTX,
off_h, off_z, off_hz, start_n, num_block, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, SEQUENCE_PARALLEL:
tl.constexpr, CAUSAL: tl.constexpr, MMA_V3: tl.constexpr):
if CAUSAL:
lo = start_n * BLOCK_M
else:
lo = 0
Q_offset = (off_z * stride_qz + off_h * stride_qh) // stride_qm
DQ_offset = off_z * stride_qz + off_h * stride_qh
K_offset = (off_z * stride_kz + off_h * stride_kh) // stride_kn
V_offset = (off_z * stride_vz + off_h * stride_vh) // stride_vn
if SEQUENCE_PARALLEL:
DQ_offset += stride_dqa * start_n
DQ_offset = DQ_offset // stride_qm
Q_block_ptr = tl.advance(Q_block_ptr, (lo + Q_offset, 0))
K_block_ptr = tl.advance(K_block_ptr, (start_n * BLOCK_M + K_offset, 0))
V_block_ptr = tl.advance(V_block_ptr, (start_n * BLOCK_M + V_offset, 0))
DO_block_ptr = tl.advance(DO_block_ptr, (lo + Q_offset, 0))
DQ_block_ptr = tl.advance(DQ_block_ptr, (lo + DQ_offset, 0))
DK_block_ptr = tl.advance(DK_block_ptr, (start_n * BLOCK_M + K_offset, 0))
DV_block_ptr = tl.advance(DV_block_ptr, (start_n * BLOCK_M + V_offset, 0))
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
D_ptrs = D + off_hz * N_CTX
l_ptrs = L + off_hz * N_CTX
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
q = tl.load(Q_block_ptr)
if CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :], float(
0.0), float('-inf'))
else:
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
qk *= qk_scale
l_i = tl.load(l_ptrs + offs_m_curr)
p = tl.math.exp2(qk - l_i[:, None])
do = tl.load(DO_block_ptr)
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.dot(do, tl.trans(v))
ds = (p * (dp - Di[:, None]) * sm_scale).to(Q.dtype.element_ty)
dk += tl.dot(tl.trans(ds), q)
if not SEQUENCE_PARALLEL:
dq = tl.load(DQ_block_ptr)
dq += tl.dot(ds, k)
tl.store(DQ_block_ptr, dq.to(Q.dtype.element_ty))
elif SEQUENCE_PARALLEL:
if MMA_V3:
dq = tl.dot(ds, k)
else:
dq = tl.trans(tl.dot(tl.trans(k), tl.trans(ds)))
tl.store(DQ_block_ptr, dq.to(Q.dtype.element_ty))
DQ_block_ptr = tl.advance(DQ_block_ptr, (BLOCK_M, 0))
Q_block_ptr = tl.advance(Q_block_ptr, (BLOCK_M, 0))
DO_block_ptr = tl.advance(DO_block_ptr, (BLOCK_M, 0))
tl.store(DV_block_ptr, dv.to(V.dtype.element_ty))
tl.store(DK_block_ptr, dk.to(K.dtype.element_ty))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/multi_head_attention_kernels.py |
a03e38da-ffa6-4c28-bae7-c2831152756e | bwd_kernel_dq.py | ROCm/aotriton | tritonsrc/bwd_kernel_dq.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def bwd_kernel_dq(Q, K, V, B, sm_scale, Out, DO, DQ, DB, L, D, stride_qz,
stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn,
stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_bz,
stride_bh, stride_bm, stride_bn, stride_oz, stride_oh, stride_om,
stride_ok, stride_dqz, stride_dqh, stride_dqm, stride_dqk, stride_dbz,
stride_dbh, stride_dbm, stride_dbn, num_head_q: 'i32', num_head_k:
'i32', cu_seqlens_q, cu_seqlens_k, num_seqlens, max_seqlen_q,
max_seqlen_k, head_dim, dropout_p, philox_seed_ptr, philox_offset1:
'*u32', philox_offset2: 'u32', BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.
constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr, ENABLE_DROPOUT:
tl.constexpr, PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.constexpr):
philox_seed = 0
philox_offset_base = philox_offset2
if ENABLE_DROPOUT:
philox_seed = tl.load(philox_seed_ptr)
philox_offset_base += tl.load(philox_offset1)
start_q = tl.program_id(0) * BLOCK_M
off_h_q = tl.program_id(1)
off_h_k = off_h_q if num_head_q == num_head_k else off_h_q // (num_head_q
// num_head_k)
off_z = tl.program_id(2)
num_z = tl.num_programs(2)
off_zh = off_z * num_head_q + off_h_q * 1
offs_q = start_q + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
ld_offs_d = None if not PADDED_HEAD else tl.arange(0, BLOCK_DMODEL)
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
seqlen_q = max_seqlen_q
seqlen_k = max_seqlen_k
batch_index = off_z
if num_seqlens > 0:
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
if start_q >= seqlen_q:
return
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
batch_index = 0
if num_seqlens < 0:
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
if start_q >= seqlen_q:
return
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
batch_index = off_z
q_offset = (off_h_q * stride_qh + batch_index * stride_qz +
cu_seqlens_q_start * stride_qm)
Q += q_offset
q_ptrs = Q + offs_q[:, None] * stride_qm + offs_d[None, :] * stride_qk
if start_q + BLOCK_M <= seqlen_q:
q = load_fn(q_ptrs, None, ld_offs_d, seqlen_q, head_dim)
else:
q = load_fn(q_ptrs, offs_q, ld_offs_d, seqlen_q, head_dim)
qk_scale = sm_scale * 1.44269504089
bias_scale = 1.0 / sm_scale
k_offset = (off_h_k * stride_kh + batch_index * stride_kz +
cu_seqlens_k_start * stride_kn)
K += k_offset
kt_ptrs = K + offs_d[:, None] * stride_kk + offs_n[None, :] * stride_kn
v_offset = (off_h_k * stride_vh + batch_index * stride_vz +
cu_seqlens_k_start * stride_vk)
V += v_offset
vt_ptrs = V + offs_d[:, None] * stride_vn + offs_n[None, :] * stride_vk
do_offset = (off_h_q * stride_oh + batch_index * stride_oz +
cu_seqlens_q_start * stride_om)
DO += do_offset
do_ptrs = DO + offs_q[:, None] * stride_om + offs_d[None, :] * stride_ok
if start_q + BLOCK_M <= seqlen_q:
do = load_fn(do_ptrs, None, ld_offs_d, seqlen_q, head_dim)
else:
do = load_fn(do_ptrs, offs_q, ld_offs_d, seqlen_q, head_dim)
D_ptrs = D + off_zh * max_seqlen_q
l_ptrs = L + off_zh * max_seqlen_q
if ENABLE_DROPOUT:
batch_philox_offset = (philox_offset_base + off_zh * max_seqlen_q *
max_seqlen_k)
else:
batch_philox_offset = 0
dq_offset = (batch_index * stride_dqz + off_h_q * stride_dqh +
cu_seqlens_q_start * stride_dqm)
DQ += dq_offset
store_db = True
if BIAS_TYPE == 0:
B_block_ptr = 0
DB_block_ptr = 0
elif BIAS_TYPE == 1:
B_block_ptr = tl.make_block_ptr(base=B + off_h_q * stride_bh +
batch_index * stride_bz, shape=(seqlen_q, seqlen_k), strides=(
stride_bm, stride_bn), offsets=(start_q, 0), block_shape=(
BLOCK_M, BLOCK_N), order=(1, 0))
if (stride_dbz == 0 and stride_dbh == 0) and stride_dbm == 0:
store_db = False
DB_block_ptr = tl.make_block_ptr(base=DB + off_h_q * stride_dbh +
batch_index * stride_dbz, shape=(seqlen_q, seqlen_k), strides=(
stride_dbm, stride_dbn), offsets=(start_q, 0), block_shape=(
BLOCK_M, BLOCK_N), order=(1, 0))
else:
tl.static_assert(False, f'Unsupported BIAS_TYPE {BIAS_TYPE}')
k_lo = 0
k_hi = min(start_q + BLOCK_M, seqlen_k) if CAUSAL else seqlen_k
real_seqlen_k = k_hi - k_lo
n_blocks = tl.cdiv(k_hi - k_lo, BLOCK_N)
n_extra_tokens = 0
if real_seqlen_k < BLOCK_N:
n_extra_tokens = BLOCK_N - real_seqlen_k
elif real_seqlen_k % BLOCK_N:
n_extra_tokens = real_seqlen_k % BLOCK_N
is_irregular_k = n_extra_tokens != 0
n_full_blocks = (k_hi - k_lo) // BLOCK_N
leading_masked_blocks = 0
trailing_masked_blocks = 0
if CAUSAL:
mask_top_edge = min(start_q, seqlen_k)
n_full_blocks = (mask_top_edge - k_lo) // BLOCK_N
trailing_masked_blocks = n_blocks - n_full_blocks
else:
trailing_masked_blocks = 1 if is_irregular_k else 0
q_boundary = tl.full((BLOCK_M,), seqlen_q, dtype=tl.int32)
d_lse_ptrs_mask = offs_q < q_boundary
Di = tl.load(D_ptrs + offs_q, mask=d_lse_ptrs_mask, other=0.0)
l_i = tl.load(l_ptrs + offs_q, mask=d_lse_ptrs_mask, other=0.0)
dropout_scale = 1.0 / (1.0 - dropout_p) if ENABLE_DROPOUT else 1.0
dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
if n_full_blocks > 0:
lo = 0
hi = n_full_blocks * BLOCK_N
dq = bwd_inner_dq(dq, qk_scale, bias_scale, DB_block_ptr, store_db,
q, kt_ptrs, stride_kn, vt_ptrs, stride_vk, B_block_ptr, do, Di,
l_i, seqlen_q, seqlen_k, head_dim, start_q, lo, hi, dropout_p,
dropout_scale, philox_seed, batch_philox_offset, max_seqlen_k,
BLOCK_M, BLOCK_DMODEL, BLOCK_N, True, False, ENABLE_DROPOUT,
PADDED_HEAD, BIAS_TYPE)
if trailing_masked_blocks > 0:
lo = n_full_blocks * BLOCK_N
hi = k_hi
tl.debug_barrier()
dq = bwd_inner_dq(dq, qk_scale, bias_scale, DB_block_ptr, store_db,
q, kt_ptrs, stride_kn, vt_ptrs, stride_vk, B_block_ptr, do, Di,
l_i, seqlen_q, seqlen_k, head_dim, start_q, lo, hi, dropout_p,
dropout_scale, philox_seed, batch_philox_offset, max_seqlen_k,
BLOCK_M, BLOCK_DMODEL, BLOCK_N, False, CAUSAL, ENABLE_DROPOUT,
PADDED_HEAD, BIAS_TYPE)
dq = (dq * sm_scale).to(dq.type.element_ty)
mstore2d(dq, BLOCK_M, BLOCK_DMODEL, o_base=DQ, o_start_row=start_q,
o_start_col=0, o_rows=seqlen_q, o_cols=head_dim, stride_row=
stride_dqm, stride_col=stride_dqk)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/bwd_kernel_dq.py |
1bc0dab2-df5d-49d7-8357-25c023511ef4 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/hgrn/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BD': BD}, num_warps=num_warps) for
BD in [32, 64, 128] for num_warps in [1, 2, 4, 8]], key=['D'])
@triton.jit
def fused_recurrent_hgrn_bwd_kernel(g, o, h0, dx, dg, do, dht, dh0, offsets,
T: tl.constexpr, D: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE:
tl.constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr, USE_OFFSETS: tl.
constexpr):
i_d, i_n = tl.program_id(0), tl.program_id(1)
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_g = g + (bos + T - 1) * D + o_d
p_o = o + (bos + T - 2) * D + o_d
p_dx = dx + (bos + T - 1) * D + o_d
p_dg = dg + (bos + T - 1) * D + o_d
p_do = do + (bos + T - 1) * D + o_d
b_dh = tl.zeros([BD], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_dht = dht + i_n * D + o_d
b_dh += tl.load(p_dht, mask=mask, other=0).to(tl.float32)
for i in range(T - 1, -1, -1):
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
if i > 0:
b_o = tl.load(p_o, mask=mask, other=0).to(tl.float32)
elif USE_INITIAL_STATE:
b_o = tl.load(h0 + i_n * D + o_d, mask=mask, other=0).to(tl.float32
)
else:
b_o = tl.zeros([BD], dtype=tl.float32)
b_dh = b_dh + b_do
b_dx = b_dh
b_dh = b_dh * tl.exp(b_g)
b_dg = b_dh * b_o
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), mask=mask)
p_g -= D
p_o -= D
p_dx -= D
p_dg -= D
p_do -= D
if USE_INITIAL_STATE:
p_dh0 = dh0 + i_n * D + o_d
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/hgrn/fused_recurrent.py |
4f77ab02-60d6-4972-ba32-de199f864ca3 | blocksparse_logsumexp.py | kimiasa/Experiments | src/models/attention/blocksparse_logsumexp.py | c4e73bfefd8290695ec52b6386b6b81838ca94a1 | 0 | @triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[5] *
meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[5]) *
meta['BLOCK']})
@triton.jit
def _backward(X, OUT, DX, DOUT, LUT, sizemax, stride_zx, stride_zout,
stride_hout, stride_zdx, stride_zdout, stride_hdout, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
blockid = tl.load(LUT + offset + rbmn * 4)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pdx = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pout = (OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK +
rxm)
pdout = (DOUT + pidz * stride_zdout + headid * stride_hdout + rowid *
BLOCK + rxm)
x = tl.load(px, mask=check, other=-float('inf'))
out = tl.load(pout)
dout = tl.load(pdout)
x = x.to(tl.float32)
out = out.to(tl.float32)
dout = dout.to(tl.float32)
dx = dout * tl.exp(-(out - x))
tl.store(pdx, dx, mask=check)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/models/attention/blocksparse_logsumexp.py |
97d8600e-0e67-46a3-81b1-9dc82a69f084 | vector_add3.py | danielpyon/ml-kernels | triton/vector_add3.py | 506186b419335b590da538ffb388aea2c7c26c03 | 0 | @triton.jit
def add_kernel(x, y, out, n, BLOCK_SIZE: tl.constexpr):
start = BLOCK_SIZE * tl.program_id(axis=0)
offsets = start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n
xs = tl.load(x + offsets, mask=mask)
ys = tl.load(y + offsets, mask=mask)
tl.store(out + offsets, xs + ys, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT",
"BSD"
] | https://github.com/danielpyon/ml-kernels/blob/506186b419335b590da538ffb388aea2c7c26c03/triton/vector_add3.py |
965bd139-0aa6-4b52-b2f1-093fcc0ab9b0 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/rebased/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def _parallel_rebased_bwd_dq(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dq,
s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl
.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BTL: tl.
constexpr, BTS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr):
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_c * BTL, i_v * BV), (BTL, BV), (1, 0))
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTL, BK), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype)
b_q = (b_q * scale).to(b_q.dtype)
b_dq = tl.zeros([BTL, BK], dtype=tl.float32)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (0,
i_k * BK), (BTS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (i_v *
BV, 0), (BV, BTS), (0, 1))
p_dz = dz + i_bh * T + i_c * BTL + tl.arange(0, BTL)
b_dz = tl.load(p_dz, mask=i_c * BTL + tl.arange(0, BTL) < T)
for _ in range(0, i_c * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
if i_v == 0:
b_ds += b_dz[:, None]
else:
b_ds = b_ds
b_s = tl.dot(b_q, tl.trans(b_k), allow_tf32=False)
b_dq += tl.dot((2 * b_ds * b_s).to(b_v.dtype), b_k, allow_tf32=False)
p_k = tl.advance(p_k, (BTS, 0))
p_v = tl.advance(p_v, (0, BTS))
b_dq *= scale
o_q = tl.arange(0, BTL)
o_k = tl.arange(0, BTS)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (i_v *
BV, i_c * BTL), (BV, BTS), (0, 1))
for _ in range(i_c * BTL, (i_c + 1) * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
m_s = o_q[:, None] >= o_k[None, :]
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
if i_v == 0:
b_ds += b_dz[:, None]
else:
b_ds = b_ds
b_ds = tl.where(m_s, b_ds, 0) * scale
b_s = tl.dot(b_q, tl.trans(b_k), allow_tf32=False)
b_s = tl.where(m_s, b_s, 0)
b_dq += tl.dot((2 * b_ds * b_s).to(b_k.dtype), b_k, allow_tf32=False)
p_k = tl.advance(p_k, (BTS, 0))
p_v = tl.advance(p_v, (0, BTS))
o_k += BTS
p_dq = tl.make_block_ptr(dq + (i_bh + B * H * i_v) * s_k_h, (T, K), (
s_k_t, s_k_d), (i_c * BTL, i_k * BK), (BTL, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rebased/parallel.py |
30b6c5be-8933-4c72-b749-323b8c021e41 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/retention/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def fused_recurrent_retention_bwd_kernel(q, k, v, h0, do, dq, dk, dv, dh0,
dht, offsets, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
REVERSE: tl.constexpr, USE_INITIAL_STATE: tl.constexpr,
USE_FINAL_STATE_GRADIENT: tl.constexpr, USE_OFFSETS: tl.constexpr,
HEAD_FIRST: tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
b_b = 1 - tl.math.exp2(-5 - i_h * 1.0)
if HEAD_FIRST:
p_k = k + i_nh * T * K + ((T - 1) * K if REVERSE else 0
) + i_k * BK + tl.arange(0, BK)
p_v = v + i_nh * T * V + ((T - 1) * V if REVERSE else 0
) + i_v * BV + tl.arange(0, BV)
p_do = do + i_nh * T * V + ((T - 1) * V if REVERSE else 0
) + i_v * BV + tl.arange(0, BV)
p_dq = dq + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if REVERSE else
0) + i_k * BK + tl.arange(0, BK)
else:
p_k = k + (bos + (T - 1 if REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_v = v + (bos + (T - 1 if REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_do = do + (bos + (T - 1 if REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_dq = dq + (i_v * all + bos + (T - 1 if REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
mask_k = i_k * BK + tl.arange(0, BK) < K
mask_v = i_v * BV + tl.arange(0, BV) < V
mask_h = mask_k[:, None] & mask_v[None, :]
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32)
b_h = b_b * b_h + b_k[:, None] * b_v[None, :]
b_dq = tl.sum(b_h * b_do[None, :], axis=1) * scale
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), mask=mask_k)
p_k += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_v += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_do += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_dq += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
tl.debug_barrier()
if HEAD_FIRST:
p_q = q + i_nh * T * K + ((T - 1) * K if not REVERSE else 0
) + i_k * BK + tl.arange(0, BK)
p_k = k + i_nh * T * K + ((T - 1) * K if not REVERSE else 0
) + i_k * BK + tl.arange(0, BK)
p_v = v + i_nh * T * V + ((T - 1) * V if not REVERSE else 0
) + i_v * BV + tl.arange(0, BV)
p_do = do + i_nh * T * V + ((T - 1) * V if not REVERSE else 0
) + i_v * BV + tl.arange(0, BV)
p_dk = dk + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if not
REVERSE else 0) + i_k * BK + tl.arange(0, BK)
p_dv = dv + (i_k * B * H + i_nh) * T * V + ((T - 1) * V if not
REVERSE else 0) + i_v * BV + tl.arange(0, BV)
else:
p_q = q + (bos + (T - 1 if not REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_k = k + (bos + (T - 1 if not REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_v = v + (bos + (T - 1 if not REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_do = do + (bos + (T - 1 if not REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_dk = dk + (i_v * all + bos + (T - 1 if not REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_dv = dv + (i_k * all + bos + (T - 1 if not REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_ht = dht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
b_dh += tl.load(p_ht, mask=mask_h, other=0).to(tl.float32)
for _ in range(T):
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32)
b_dh += b_q[:, None] * b_do[None, :]
b_dk = tl.sum(b_dh * b_v[None, :], axis=1)
b_dv = tl.sum(b_dh * b_k[:, None], axis=0)
b_dh *= b_b
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), mask=mask_k)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), mask=mask_v)
p_q += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K
p_k += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K
p_v += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V
p_do += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V
p_dk += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K
p_dv += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V
if USE_INITIAL_STATE:
p_dh0 = dh0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), mask=mask_h)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/fused_recurrent.py |
256c710f-0067-4c80-b37c-d4bbf785d671 | block_offsets.py | Forkxz/TritonDeepLearningKernel | kernel/block_offsets.py | add54b6318e8fa5fdbf8c7b47659de9fceaa5691 | 0 | @triton.jit
def block_offsets_3d(shape_x, shape_y, shape_z, stride_x, stride_y,
stride_z, offset_x, offset_y, offset_z, block_shape_x, block_shape_y,
block_shape_z, require_mask=False):
offs_x = tl.arange(0, block_shape_x) + offset_x
offs_y = tl.arange(0, block_shape_y) + offset_y
offs_z = tl.arange(0, block_shape_z) + offset_z
ptrs = offs_x[:, None, None] * stride_x + offs_y[None, :, None
] * stride_y + offs_z[None, None, :] * stride_z
if require_mask:
mask = (offs_x[:, None, None] < shape_x) & (offs_y[None, :, None] <
shape_y) & (offs_z[None, None, :] < shape_z)
return ptrs, mask
else:
return ptrs
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [
"Strided Access",
"Tiled"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/block_offsets.py |
a0e121c3-7ebc-4eb8-bdeb-cb903a4b9850 | softmax.py | Nagi-ovo/diy | OpenAI-Triton/softmax.py | d7c119aa762b9103109d29abaebee345246fe5d7 | 0 | @triton.jit
def _softmax_fwd_kernel(output_ptr, stride_output_rop, input_ptr,
stride_input_row, num_cols, block_size: tl.constexpr):
row_index = tl.program_id(0)
row_start_ptr = input_ptr + row_index * stride_input_row
col_offsets = tl.arange(0, block_size)
input_pointers = row_start_ptr + col_offsets
row_mask = col_offsets < num_cols
row = tl.load(input_pointers, mask=row_mask, other=float('-inf'))
safe_row = row - tl.max(row, axis=0)
numerator = tl.exp(safe_row)
denominator = tl.sum(numerator, axis=0)
sm_out = numerator / denominator
output_row_ptr = output_ptr + row_index * stride_output_rop
output_pointers = output_row_ptr + col_offsets
tl.store(output_pointers, sm_out, mask=row_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Nagi-ovo/diy/blob/d7c119aa762b9103109d29abaebee345246fe5d7/OpenAI-Triton/softmax.py |
735e0114-467d-4933-bae8-e10228aac0d5 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_bwd_kernel_rcum_intra(s, z, ss, doo, s_s_h, s_s_t, s_s_d, T:
tl.constexpr, S: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BS:
tl.constexpr, NC: tl.constexpr):
i_s, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_t, i_i = i_c // NC, i_c % NC
o_i = tl.arange(0, BC)
m_o = tl.full([BC, BC], 1.0, dtype=tl.float32)
p_s = tl.make_block_ptr(s + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), (i_t *
BT + i_i * BC, i_s * BS), (BC, BS), (1, 0))
p_zn = tl.make_block_ptr(z + i_bh * s_s_h, (T * S,), (s_s_d,), ((i_t *
BT + i_i * BC + BC - 1) * S + i_s * BS,), (BS,), (0,))
p_doo = tl.make_block_ptr(doo + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), (
i_t * BT + i_i * BC, i_s * BS), (BC, BS), (1, 0))
b_s = tl.load(p_s, boundary_check=(0, 1))
b_zn = tl.load(p_zn, boundary_check=(0,))
b_doo = tl.zeros([BC, BS], dtype=tl.float32)
for i_j in range(i_i + 1, NC):
p_z = tl.make_block_ptr(z + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), (
i_t * BT + i_j * BC, i_s * BS), (BC, BS), (1, 0))
p_ss = tl.make_block_ptr(ss + i_bh * s_s_h, (T, S), (s_s_t, s_s_d),
(i_t * BT + i_j * BC, i_s * BS), (BC, BS), (1, 0))
b_z = tl.load(p_z, boundary_check=(0, 1))
b_ss = tl.load(p_ss, boundary_check=(0, 1))
b_doo += b_ss * tl.exp(b_zn[None, :] - b_z)
b_doo = tl.exp(b_s - b_zn[None, :]) * tl.dot(m_o.to(b_s.dtype), b_doo.
to(b_s.dtype), allow_tf32=False)
for j in range(0, BC):
p_z = tl.make_block_ptr(z + i_bh * s_s_h, (T * S,), (1,), ((i_t *
BT + i_i * BC + j) * S + i_s * BS,), (BS,), (0,))
p_ss = tl.make_block_ptr(ss + i_bh * s_s_h, (T * S,), (1,), ((i_t *
BT + i_i * BC + j) * S + i_s * BS,), (BS,), (0,))
b_z = tl.load(p_z, boundary_check=(0,))
b_ss = tl.load(p_ss, boundary_check=(0,))
m_i = o_i[:, None] <= j
b_doo += tl.where(m_i, tl.exp(b_s - b_z[None, :]) * b_ss[None, :], 0.0)
b_doo += tl.load(p_doo, boundary_check=(0, 1))
tl.store(p_doo, b_doo.to(p_doo.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
e64784a5-f146-4390-946f-2befee210aea | triton_jagged_tensor_ops.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def triton_batched_dense_vec_jagged_2d_matmul(jagged_tensor_ptr, dense_ptr,
jagged_offset, thread_block_col_size: tl.constexpr, dense_row_stride,
jagged_value_row_stride, D, H: tl.constexpr, output_ptr) ->None:
pid = tl.program_id(0)
GRID_DIM_COL = (D + thread_block_col_size - 1) // thread_block_col_size
output_row_idx = pid // GRID_DIM_COL
jagged_offset_id = output_row_idx // H
D_refer_idx = output_row_idx % H
group_id = pid % GRID_DIM_COL
offset = group_id * thread_block_col_size + tl.arange(0,
thread_block_col_size)
begin = tl.load(jagged_offset + jagged_offset_id)
end = tl.load(jagged_offset + (jagged_offset_id + 1))
dense_ptr += output_row_idx * dense_row_stride
jagged_tensor_ptr += begin * jagged_value_row_stride + D_refer_idx * D
output_ptr += D * output_row_idx
num_row = tl.minimum(end - begin, dense_row_stride)
acc = tl.zeros((thread_block_col_size,), dtype=tl.float32)
mask = offset < D
for i in range(num_row):
val1 = tl.load(dense_ptr + i)
val2 = tl.load(jagged_tensor_ptr + offset, mask=mask, other=0.0)
result = val1 * val2
acc += result
jagged_tensor_ptr += jagged_value_row_stride
tl.store(output_ptr + offset, acc, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py |
fae6dfee-0856-48f2-8c09-198c2bf008d0 | mse.py | l1351868270/implicit_gemm.triton | triton_kernel/mse.py | 64eb8548ccf4576883c928f6315be8b24680a455 | 0 | @triton.jit
def _ld_mse_bwd_kernel(grad_ptr, input_ptr, target_ptr, grad_output,
grad_row_stride, input_row_stride, target_row_stride, n_rows, n_cols,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
grad_ptrs = grad_ptr + pid * grad_row_stride + col_offsets
input_ptrs = input_ptr + pid * input_row_stride + col_offsets
target_ptrs = target_ptr + pid * target_row_stride + col_offsets
input = tl.load(input_ptrs, mask=mask, other=0.0)
target = tl.load(target_ptrs, mask=mask, other=0.0)
grad_ = (input - target) * 2 * grad_output / (n_rows * n_cols)
tl.store(grad_ptrs, grad_, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/l1351868270/implicit_gemm.triton/blob/64eb8548ccf4576883c928f6315be8b24680a455/triton_kernel/mse.py |
d2319479-4740-45ba-a17c-d0a3e92ef405 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def bwd_decay_global_cumsum(dq_inner, dq_inter, dk_inner, dk_inter, q, k, g,
dg, s_k_h, BT: tl.constexpr, BK: tl.constexpr, K: tl.constexpr):
i_k, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (i_c * BT + BT - 1
) * K
p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (i_c * BT + BT - 1
) * K
p_g = g + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (i_c * BT + BT - 1
) * K
p_dg = dg + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (i_c * BT + BT - 1
) * K
p_dq_inner = dq_inner + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (
i_c * BT + BT - 1) * K
p_dk_inner = dk_inner + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (
i_c * BT + BT - 1) * K
p_dq_inter = dq_inter + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (
i_c * BT + BT - 1) * K
p_dk_inter = dk_inter + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (
i_c * BT + BT - 1) * K
cum_grad_dg = tl.zeros([BK], dtype=tl.float32)
mask = i_k * BK + tl.arange(0, BK) < K
last_g = tl.zeros([BK], dtype=tl.float32)
for j in range(BT - 1, -1, -1):
_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
if j == BT - 1:
last_g = _g
b_dq1 = tl.load(p_dq_inner, mask=mask, other=0)
b_dq2 = tl.load(p_dq_inter, mask=mask, other=0)
b_dq2 *= tl.exp(_g)
b_dq = b_dq1 + b_dq2
tl.store(p_dq_inter, b_dq, mask=mask)
b_dk1 = tl.load(p_dk_inner, mask=mask, other=0)
b_dk2 = tl.load(p_dk_inter, mask=mask, other=0)
b_dk2 *= tl.exp(last_g - _g)
b_dk = b_dk1 + b_dk2
tl.store(p_dk_inter, b_dk, mask=mask)
b_q = tl.load(p_q, mask=mask, other=0)
b_k = tl.load(p_k, mask=mask, other=0)
b_dg = b_dq * b_q - b_dk * b_k
cum_grad_dg += b_dg
tl.store(p_dg, cum_grad_dg.to(p_dg.dtype.element_ty), mask=mask)
p_g -= K
p_k -= K
p_q -= K
p_dq_inner -= K
p_dk_inner -= K
p_dq_inter -= K
p_dk_inter -= K
p_dg -= K
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py |
c043b3cc-fffa-49f1-90d0-c95423a99f18 | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_NON_REDUCE_DIM': b,
'BLOCK_SIZE_REDUCE_DIM': b}, num_warps=w) for b, w in itertools.product
([2, 4, 8, 16], [2, 4, 8])], key=['M', 'N'])
@triton.jit
def triton_sum_kernel_1D_result_buffer_then_sum(input_ptr, output_ptr, M, N,
BLOCK_SIZE_NON_REDUCE_DIM: tl.constexpr, BLOCK_SIZE_REDUCE_DIM: tl.
constexpr, dim: tl.constexpr):
"""
Add blocks of input to a buffer and sum the buffer using Triton
"""
pid = tl.program_id(axis=0)
reduce_dim_len = M if dim == 0 else N
non_reduce_dim_len = N if dim == 0 else M
buffer = tl.zeros((BLOCK_SIZE_REDUCE_DIM, BLOCK_SIZE_NON_REDUCE_DIM),
dtype=tl.float32)
block_start_non_reduce_dim = pid * BLOCK_SIZE_NON_REDUCE_DIM
offsets_non_reduce_dim = block_start_non_reduce_dim + tl.arange(0,
BLOCK_SIZE_NON_REDUCE_DIM)
mask_non_reduce_dim = offsets_non_reduce_dim < non_reduce_dim_len
for block_start_reduce_dim in range(0, reduce_dim_len,
BLOCK_SIZE_REDUCE_DIM):
offsets_reduce_dim = block_start_reduce_dim + tl.arange(0,
BLOCK_SIZE_REDUCE_DIM)
mask_reduce_dim = offsets_reduce_dim < reduce_dim_len
idxs, mask = None, None
if dim == 0:
idxs = offsets_reduce_dim[:, None
] * non_reduce_dim_len + offsets_non_reduce_dim
mask = mask_reduce_dim[:, None] & mask_non_reduce_dim
elif dim == 1:
idxs = offsets_non_reduce_dim[:, None
] * reduce_dim_len + offsets_reduce_dim
mask = mask_non_reduce_dim[:, None] & mask_reduce_dim
buffer += tl.load(input_ptr + idxs, mask=mask, other=mask)
buffer_sum = tl.sum(buffer, axis=dim)
buffer_view = buffer_sum.reshape((BLOCK_SIZE_NON_REDUCE_DIM,))
tl.store(output_ptr + offsets_non_reduce_dim, buffer_view, mask=
mask_non_reduce_dim)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py |
1204a122-b1ff-4519-8d58-a59f218f6150 | batched_matmul.py | MichaelWei7/torch | _inductor/triton_ops/batched_matmul.py | 4bfe6988308edc9544ddae94bfdcf83a4326b04a | 0 | @triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] *
args['SPLIT_K']) == 0})
@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256,
'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), triton.Config
({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1},
num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 256, 'BLOCK_N':
64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), triton.
Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1},
num_stages=4, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N':
128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1},
num_stages=4, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N':
128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1},
num_stages=4, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32,
'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2), triton.Config
({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 256, 'BLOCK_N':
128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=4, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N':
256, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=2, num_warps=4), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=2, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N':
256, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=1, num_warps=8), triton.
Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 256, 'BLOCK_N':
64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=1, num_warps=4), triton.
Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N':
128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=1, num_warps=4), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=4, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N':
128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=4, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32,
'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2), triton.Config
({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N':
128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=1, num_warps=4), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1},
num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32,
'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=1, num_warps=2)], key=['M',
'N', 'K'])
@triton.jit
def _kernel(A, B, C, M, N, K, stride_am, stride_ak, stride_bk, stride_bn,
stride_cm, stride_cn, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr, ACC_TYPE: tl.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
bid = tl.program_id(2)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
A += bid * M * K
B += bid * K * N
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for k in range(K, 0, -BLOCK_K * SPLIT_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.0)
b = tl.load(B, mask=rk[:, None] < k, other=0.0)
acc += tl.dot(a, b)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
acc = acc.to(C.dtype.element_ty)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
C += bid * M * N
mask = (rm < M)[:, None] & (rn < N)[None, :]
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [
"fp32",
"int32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/MichaelWei7/torch/blob/4bfe6988308edc9544ddae94bfdcf83a4326b04a/_inductor/triton_ops/batched_matmul.py |
b5fecd23-5b89-4b3c-8fad-037f53bf7408 | prefix_sums.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/prefix_sums.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.jit
def scan_kernel(x_ptr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, AXIS: tl.constexpr):
range_m = tl.arange(0, BLOCK_SIZE_M)
range_n = tl.arange(0, BLOCK_SIZE_N)
x = tl.load(x_ptr + range_m[:, None] * BLOCK_SIZE_N + range_n[None, :])
x = tl.cumsum(x, axis=AXIS)
tl.store(x_ptr + range_m[:, None] * BLOCK_SIZE_N + range_n[None, :], x)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/prefix_sums.py |
3a506554-8ef0-4725-a568-9de1c8c01198 | layer_norm_kernels.py | BobMcDear/attorch | attorch/layer_norm_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])})
@triton.jit
def layer_norm_forward_kernel(input_pointer, weight_pointer, bias_pointer,
mean_pointer, inv_std_pointer, output_pointer, batch_dim, feat_dim,
input_batch_stride, input_feat_stride, output_batch_stride,
output_feat_stride, eps, scale_by_weight: tl.constexpr, add_bias: tl.
constexpr, save_stats: tl.constexpr, BLOCK_SIZE_BATCH: tl.constexpr,
BLOCK_SIZE_FEAT: tl.constexpr):
"""
Layer-normalizes the input.
Args:
input_pointer: Pointer to the input to layer-normalize.
The input must be of shape [batch_dim, feat_dim].
weight_pointer: Pointer to optional weights for affine transform.
The weights, if provided, must be of shape [feat_dim].
bias_pointer: Pointer to an optional bias vector for affine transform.
The bias vector, if provided, must be of shape [feat_dim].
mean_pointer: Pointer to an optional container the input's mean
is written to if save_stats is True.
The container, if provided, must be of shape [batch_dim].
inv_std_pointer: Pointer to an optional container the input's inverse
standard deviation is written to if save_stats is True.
The container, if provided, must be of shape [batch_dim].
output_pointer: Pointer to a container the result is written to.
The container must be of shape [batch_dim, feat_dim].
batch_dim: Batch dimension.
feat_dim: Dimensionality of the features.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
output_batch_stride: Stride necessary to jump one element along the
output container's batch dimension.
output_feat_stride: Stride necessary to jump one element along the
output container's feature dimension.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
scale_by_weight: Flag for scaling the normalized output by weights.
add_bias: Flag for adding a bias vector to the normalized output
if scale_by_weight is True.
save_stats: Flag for saving the mean and standard deviation.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_FEAT: Block size across the feature dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
feat_offset = tl.arange(0, BLOCK_SIZE_FEAT)
batch_mask = batch_offset < batch_dim
feat_mask = feat_offset < feat_dim
input_pointer += input_batch_stride * batch_offset[:, None
] + input_feat_stride * feat_offset[None, :]
output_pointer += output_batch_stride * batch_offset[:, None
] + output_feat_stride * feat_offset[None, :]
input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[
None, :]).to(tl.float32)
mean = tl.sum(input, axis=1) / feat_dim
diff = tl.where(feat_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / feat_dim + eps)
if save_stats:
tl.store(mean_pointer + batch_offset, mean, mask=batch_mask)
tl.store(inv_std_pointer + batch_offset, inv_std, mask=batch_mask)
output = diff * inv_std[:, None]
if scale_by_weight:
weight = tl.load(weight_pointer + feat_offset, mask=feat_mask)
output *= weight
if add_bias:
bias = tl.load(bias_pointer + feat_offset, mask=feat_mask)
output += bias
tl.store(output_pointer, output, mask=batch_mask[:, None] & feat_mask[
None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/layer_norm_kernels.py |
77b783f4-944a-4989-ab38-630883cdbda5 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fwd_inner_chunk(q, k, g, A, s_k_h, s_k_t, s_k_d, scale, B: tl.constexpr,
H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK:
tl.constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
p_g = tl.make_block_ptr(g + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
mask = i_k * BK + tl.arange(0, BK) < K
o_i = tl.arange(0, BT)
p_q = q + i_bh * s_k_h + i_k * BK + i_t * BT * K + tl.arange(0, BK)
p_gq = g + i_bh * s_k_h + i_k * BK + i_t * BT * K + tl.arange(0, BK)
p_A = A + (i_bh + i_k * B * H) * (tl.cdiv(T, BT) * BT * BT
) + i_t * BT * BT + tl.arange(0, BT)
for i in range(BT):
_q = tl.load(p_q, mask=mask, other=0) * scale
gq = tl.load(p_gq, mask=mask, other=0).to(tl.float32)
s = _q[None, :] * b_k * tl.exp(gq[None, :] - b_g)
score = tl.sum(s, axis=1)
score = tl.where(o_i <= i, score, 0)
tl.store(p_A, score.to(p_A.dtype.element_ty))
p_q += K
p_gq += K
p_A += BT
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py |
034eebc4-faf8-4f0a-b847-5d898fd31da7 | lstm_bw.py | NX-AI/flashrnn | flashrnn/flashrnn/triton_fused/lstm_bw.py | 3fca666a81c8740af4878d7bc5e2a51900e4fe14 | 0 | @triton.jit
def triton_tanh(x):
return (1.0 - tl.exp(-2.0 * x)) / (1.0 + tl.exp(-2.0 * x))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT",
"BSD"
] | https://github.com/NX-AI/flashrnn/blob/3fca666a81c8740af4878d7bc5e2a51900e4fe14/flashrnn/flashrnn/triton_fused/lstm_bw.py |
33a2ed2a-c830-4a48-9359-fe4d01c30f36 | causal_conv1d_varlen.py | shaielc/MambaLinearCode | causal-conv1d/causal_conv1d/causal_conv1d_varlen.py | 567fc4ae197064540c1a558bbb60c78f55b95fef | 0 | @triton.jit
def _causal_conv1d_varlen_states(X, CU_SEQLENS, STATES, state_len, dim,
stride_x_seqlen, stride_x_dim, stride_states_batch,
stride_states_seqlen, stride_states_dim, BLOCK_M: tl.constexpr, BLOCK_N:
tl.constexpr):
batch_idx = tl.program_id(2)
STATES += batch_idx * stride_states_batch
end_idx = tl.load(CU_SEQLENS + batch_idx + 1)
start_idx = tl.maximum(tl.load(CU_SEQLENS + batch_idx), end_idx - state_len
)
rows = end_idx - (tl.program_id(1) + 1) * BLOCK_M + tl.arange(0, BLOCK_M)
cols = tl.program_id(0) * BLOCK_N + tl.arange(0, BLOCK_N)
x = tl.load(X + rows[:, None] * stride_x_seqlen + cols[None, :] *
stride_x_dim, mask=(rows[:, None] >= start_idx) & (cols[None, :] <
dim), other=0)
rows_states = state_len - (tl.program_id(1) + 1) * BLOCK_M + tl.arange(
0, BLOCK_M)
tl.store(STATES + rows_states[:, None] * stride_states_seqlen + cols[
None, :] * stride_states_dim, x, mask=(rows_states[:, None] >= 0) &
(cols[None, :] < dim))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"Apache"
] | https://github.com/shaielc/MambaLinearCode/blob/567fc4ae197064540c1a558bbb60c78f55b95fef/causal-conv1d/causal_conv1d/causal_conv1d_varlen.py |
fc92abc9-d8fd-4398-8a22-d121aebc63d3 | logcumsumexp.py | sustcsonglin/flash-linear-attention | fla/ops/utils/logcumsumexp.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({'BT': 16}, num_warps=2), triton.
Config({'BT': 16}, num_warps=4), triton.Config({'BT': 16}, num_warps=8),
triton.Config({'BT': 32}, num_warps=2), triton.Config({'BT': 32},
num_warps=4), triton.Config({'BT': 32}, num_warps=8), triton.Config({
'BT': 64}, num_warps=2), triton.Config({'BT': 64}, num_warps=4), triton
.Config({'BT': 64}, num_warps=8)], key=['S'])
@triton.jit
def logcumsumexp_fwd_kernel(s, z, s_s_h, s_s_t, s_s_d, T: tl.constexpr, S:
tl.constexpr, BT: tl.constexpr):
i_bh = tl.program_id(0)
o_i = tl.arange(0, BT)
m_s = tl.where(o_i[:, None] >= o_i[None, :], 1.0, 0.0)
b_mp = tl.full([S], float('-inf'), dtype=tl.float32)
b_zp = tl.zeros([S], dtype=tl.float32)
for i_t in range(tl.cdiv(T, BT)):
p_s = tl.make_block_ptr(s + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), (
i_t * BT, 0), (BT, S), (1, 0))
p_z = tl.make_block_ptr(z + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), (
i_t * BT, 0), (BT, S), (1, 0))
b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32)
b_mc = tl.max(b_s, 0)
if i_t > 0:
b_mc = tl.maximum(b_mp, b_mc)
b_zp = b_zp * tl.exp(b_mp - b_mc)
b_s = tl.exp(b_s - b_mc)
b_z = tl.dot(m_s, b_s, allow_tf32=False) + b_zp
b_zc = tl.max(b_z, 0)
b_mp = b_mc
b_zp = b_zc
b_z = tl.log(tl.where(b_z != 0, b_z, 1e-20)) + b_mc
tl.store(p_z, b_z.to(p_z.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/logcumsumexp.py |
bea526aa-fd9a-4f16-8749-f490743aecce | attn_qk_int8_per_block_h64.py | rodjjo/editorium | editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_h64.py | 7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694 | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, q_scale, K_ptrs, K_scale_ptr, V_ptrs,
start_m, BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.
constexpr, STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.
constexpr, N_CTX: tl.constexpr):
lo, hi = 0, N_CTX
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k_mask = offs_n[None, :] < N_CTX - start_n
k = tl.load(K_ptrs, mask=k_mask)
k_scale = tl.load(K_scale_ptr)
qk = tl.dot(q, k).to(tl.float32) * q_scale * k_scale
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
acc = acc * alpha[:, None]
v = tl.load(V_ptrs, mask=offs_n[:, None] < N_CTX - start_n)
p = p.to(tl.float16)
acc += tl.dot(p, v, out_dtype=tl.float16)
m_i = m_ij
K_ptrs += BLOCK_N * HEAD_DIM
K_scale_ptr += 1
V_ptrs += BLOCK_N * HEAD_DIM
return acc, l_i
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_h64.py |
eef6506d-e789-4c85-8695-427fcf745d3f | atomic.py | daemyung/practice-triton | atomic.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @triton.jit
def atomic_kernel(x_ptr, increment):
tl.atomic_add(x_ptr, increment)
| {
"Data Type": [
"uint8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/atomic.py |
c87e2484-0947-436e-9ac0-d1273f24b0a3 | rmsnorm.py | ardywibowo/triton-mode | kernels/rmsnorm.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_rmsnorm_backward(dY_ptr, dY_row_stride, X_ptr, X_row_stride,
X_dtype: tl.constexpr, W_ptr, RSTD_ptr, RSTD_row_stride, dW_ptr,
dW_row_stride, n_rows, n_cols, offset, rows_per_program: tl.constexpr,
BLOCK_SIZE: tl.constexpr):
row_block_id = tl.program_id(0)
row_start = row_block_id * rows_per_program
row_end = min((row_block_id + 1) * rows_per_program, n_rows)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
dW_row = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
dY_ptr += row_start * dY_row_stride
X_ptr += row_start * X_row_stride
RSTD_ptr += row_start
W_row = tl.load(W_ptr + col_offsets, mask=mask, other=0.0)
W_row = W_row + offset
for _ in range(row_start, row_end):
dY_row = tl.load(dY_ptr + col_offsets, mask=mask, other=0.0)
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0.0)
rstd_row = tl.load(RSTD_ptr)
X_row = X_row.to(tl.float32)
m = (dY_row * W_row).to(tl.float32)
dX_row = rstd_row * m
dX_row += rstd_row * (-(1 / n_cols) * rstd_row * rstd_row * tl.sum(
m * X_row, axis=0) * X_row)
dW_row += dY_row * (X_row * rstd_row).to(X_dtype)
tl.store(dY_ptr + col_offsets, dX_row.to(X_dtype), mask=mask)
dY_ptr += dY_row_stride
X_ptr += X_row_stride
RSTD_ptr += RSTD_row_stride
tl.store(dW_ptr + row_block_id * dW_row_stride + col_offsets, dW_row,
mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Normalization"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/rmsnorm.py |
61fe3795-54b0-4858-973e-4e780e63884b | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/based/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_based_bwd_kernel(q, k, v, do, dz, dq, dk, dv, s_k_h, s_k_t,
s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T:
tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK:
tl.constexpr, BV: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_h_1o = tl.zeros([BV, BK], dtype=tl.float32)
b_h_2o = tl.zeros([BV, BK * BK], dtype=tl.float32)
k_1o = tl.zeros([1, BK], dtype=tl.float32)
k_2o = tl.zeros([1, BK * BK], dtype=tl.float32)
for i in range(0, tl.cdiv(T, BT)):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i * BT, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (
i_v * BV, i * BT), (BV, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
p_dq = tl.make_block_ptr(dq + (i_bh + i_v * B * H) * s_k_h, (T, K),
(s_k_t, s_k_d), (i * BT, i_k * BK), (BT, BK), (1, 0))
p_dz = dz + i_bh * T + tl.arange(0, BT) + i * BT
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype)
b_dz = tl.load(p_dz, mask=tl.arange(0, BT) + i * BT < T)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_dq += tl.dot(b_do, b_h_1o.to(b_do.dtype), allow_tf32=False)
if i_v == 0:
b_dq += b_dz[:, None] * k_1o
b_dq_2o = tl.dot(b_do, b_h_2o.to(b_do.dtype), allow_tf32=False) * 0.5
if i_v == 0:
b_dq_2o += b_dz[:, None] * k_2o * 0.5
b_dq_2o = tl.reshape(b_dq_2o, [BT, BK, BK])
b_dq += tl.sum(b_dq_2o * b_q[:, :, None], axis=1)
b_dq += tl.sum(b_dq_2o * b_q[:, None, :], axis=2)
b_dq *= scale
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
if i_v == 0:
b_ds += b_dz[:, None]
b_ds = tl.where(m_s, b_ds, 0) * scale
b_s = tl.dot(b_q, tl.trans(b_k), allow_tf32=False)
b_s = tl.where(m_s, b_s, 0)
b_dq += tl.dot((b_ds * (1 + b_s)).to(b_q.dtype), b_k, allow_tf32=False)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
b_k_2o = b_k[:, :, None] * b_k[:, None, :]
b_k_2o = tl.reshape(b_k_2o, [BT, BK * BK]).to(b_k.dtype)
b_h_2o = b_h_2o + tl.dot(b_v, b_k_2o.to(b_v.dtype), allow_tf32=False)
b_h_1o = b_h_1o + tl.dot(b_v, b_k, allow_tf32=False)
if i_v == 0:
k_1o += tl.sum(b_k, axis=0)[None, :]
k_2o += tl.sum(b_k_2o, axis=0)[None, :]
tl.debug_barrier()
b_h_1o = None
b_h_2o = None
b_dh_1o = tl.zeros([BK, BV], dtype=tl.float32)
b_dh_2o = tl.zeros([BK * BK, BV], dtype=tl.float32)
b_dh_0o = tl.zeros([BV], dtype=tl.float32)
m_s = tl.arange(0, BT)[:, None] <= tl.arange(0, BT)[None, :]
dq_1o = tl.zeros([1, BK], dtype=tl.float32)
dq_2o = tl.zeros([BK * BK, 1], dtype=tl.float32)
for i in range(tl.cdiv(T, BT) * BT - BT, -BT, -BT):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i, i_v * BV), (BT, BV), (1, 0))
p_dk = tl.make_block_ptr(dk + (i_bh + i_v * B * H) * s_k_h, (T, K),
(s_k_t, s_k_d), (i, i_k * BK), (BT, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_bh + i_k * B * H) * s_v_h, (T, V),
(s_v_t, s_v_d), (i, i_v * BV), (BT, BV), (1, 0))
p_dz = dz + i_bh * T + tl.arange(0, BT) + i
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_dv = tl.zeros([BT, BV], dtype=tl.float32)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype)
b_dz = tl.load(p_dz, mask=tl.arange(0, BT) + i < T)
b_q = (b_q * scale).to(b_k.dtype)
b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False)
if i_v == 0:
b_ds += b_dz[None, :]
b_ds = tl.where(m_s, b_ds, 0)
b_s = tl.dot(b_k, b_q, allow_tf32=False)
b_s2 = 1 + b_s + 0.5 * b_s * b_s
b_s = tl.where(m_s, b_s, 0)
b_s2 = tl.where(m_s, b_s2, 0)
b_ds *= 1 + b_s
b_dk += tl.dot(b_ds.to(b_k.dtype), tl.trans(b_q), allow_tf32=False)
b_dv += tl.dot(b_s2.to(b_do.dtype), b_do, allow_tf32=False)
b_k_2o = b_k[:, :, None] * b_k[:, None, :]
b_k_2o = tl.reshape(b_k_2o, [BT, BK * BK]).to(b_k.dtype)
b_dv += tl.dot(b_k, b_dh_1o.to(b_k.dtype), allow_tf32=False)
b_dv += tl.dot(b_k_2o, b_dh_2o.to(b_k.dtype), allow_tf32=False)
b_dv += b_dh_0o
b_dk += tl.dot(b_v, tl.trans(b_dh_1o).to(b_k.dtype), allow_tf32=False)
if i_v == 0:
b_dk += dq_1o
b_dk_2o = tl.dot(b_dh_2o.to(b_k.dtype), tl.trans(b_v), allow_tf32=False
)
if i_v == 0:
b_dk_2o += dq_2o
b_dk_2o = tl.reshape(b_dk_2o, [BK, BK, BT])
b_k_fp32 = tl.trans(b_k.to(tl.float32))
b_dk2 = tl.sum(b_dk_2o * b_k_fp32[:, None, :], axis=0)
b_dk2 += tl.sum(b_dk_2o * b_k_fp32[None, :, :], axis=1)
b_dk += tl.trans(b_dk2)
b_dh_0o += tl.sum(b_do, axis=0)
b_dh_1o = b_dh_1o + tl.dot(b_q, b_do, allow_tf32=False)
b_q_2o = b_q[None, :, :] * b_q[:, None, :]
b_q_2o = tl.reshape(b_q_2o, [BK * BK, BT]).to(b_k.dtype)
b_dh_2o = b_dh_2o + tl.dot(b_q_2o, b_do, allow_tf32=False) * 0.5
if i_v == 0:
dq_1o += tl.sum(b_dz[None, :] * b_q, axis=1)[None, :]
dq_2o += (tl.sum(b_dz[None, :] * b_q_2o, axis=1) * 0.5)[:, None]
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/based/fused_chunk.py |
f8d8edc6-bf70-4ed3-b262-9a5b4a3846e7 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/simple_gla/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def parallel_simple_gla_bwd_kernel_dkv(i_bh, i_t, i_k, i_v, i_kv, q, k, v,
g, do, dk, dv, dg, s_k_h, s_k_t, s_v_h, s_v_t, scale, B: tl.constexpr,
H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT:
tl.constexpr, BS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_gk = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,)
)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_dv = tl.zeros([BT, BV], dtype=tl.float32)
b_gk = tl.load(p_gk, boundary_check=(0,))
NTS = tl.cdiv(T, BS)
b_kg = (b_k * tl.exp(tl.load(g + i_bh * T + min(i_t * BT + BT, T) - 1) -
b_gk)[:, None]).to(b_k.dtype)
for i_s in range(NTS * BS - BS, (i_t + 1) * BT - BS, -BS):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_s,
i_k * BK), (BS, BK), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, 1), (
i_s, i_v * BV), (BS, BV), (1, 0))
p_gq = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_s,), (BS,), (0,))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_gq = tl.load(p_gq, boundary_check=(0,))
b_gp = tl.load(g + i_bh * T + min(i_s + BS, T) - 1)
b_gn = tl.load(g + i_bh * T + i_s - 1) if i_s % BT > 0 else 0.0
b_do = tl.load(p_do, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_gq - b_gn)[:, None]).to(b_do.dtype)
b_dk *= tl.exp(b_gp - b_gn)
b_dv *= tl.exp(b_gp - b_gn)
b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False)
b_s = tl.dot(b_kg, tl.trans(b_q), allow_tf32=False)
b_dk += tl.dot(b_ds.to(b_q.dtype), b_q, allow_tf32=False)
b_dv += tl.dot(b_s.to(b_do.dtype), b_do, allow_tf32=False)
b_dk *= tl.exp(tl.load(g + i_bh * T + min(T, i_t * BT + BT) - 1) - b_gk)[
:, None] * scale
b_dv *= scale
tl.debug_barrier()
o_q = i_t * BT + tl.arange(0, BS)
o_k = i_t * BT + tl.arange(0, BT)
for i_s in range(i_t * BT, min((i_t + 1) * BT, T), BS):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_s,
i_k * BK), (BS, BK), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, 1), (
i_s, i_v * BV), (BS, BV), (1, 0))
p_gq = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_s,), (BS,), (0,))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_gq = tl.load(p_gq, boundary_check=(0,))
m_s = o_k[:, None] <= o_q[None, :]
d_s = tl.where(m_s, tl.exp(-b_gk[:, None] + b_gq[None, :]), 0) * scale
b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False) * d_s
b_s = tl.dot(b_k, tl.trans(b_q), allow_tf32=False) * d_s
b_dk += tl.dot(b_ds.to(b_q.dtype), b_q, allow_tf32=False)
b_dv += tl.dot(b_s.to(b_q.dtype), b_do, allow_tf32=False)
o_q += BS
p_dk = tl.make_block_ptr(dk + (i_v * B * H + i_bh) * s_k_h, (T, K), (
s_k_t, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_k * B * H + i_bh) * s_v_h, (T, V), (
s_v_t, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + (i_kv * B * H + i_bh) * T, (T,), (1,), (
i_t * BT,), (BT,), (0,))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
b_dg = tl.load(p_dg, boundary_check=(0,))
b_dg -= tl.sum(b_dk * b_k, 1)
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/parallel.py |
eb17d738-56f7-4b95-9035-3e3775974f90 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/retention/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_retention_bwd_kernel(q, k, v, do, dq, dk, dv, h0, scale, B:
tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, CHECK: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h = i_bh % H
o_i = tl.arange(0, BT)
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
d_q, d_k = tl.math.exp2((o_i + 1) * b_b) * scale, tl.math.exp2((BT -
o_i - 1) * b_b)
d_b = tl.math.exp2(BT * b_b)
m_s = o_i[:, None] >= o_i[None, :]
d_s = tl.where(m_s, tl.math.exp2((o_i[:, None] - o_i[None, :]) * b_b), 0
) * scale
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(h0 + i_bh * K * V, (V, K), (1, V), (i_v *
BV, i_k * BK), (BV, BK), (0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
for i in range(0, tl.cdiv(T, BT)):
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i * BT,
i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * T * V, (V, T), (1, V), (i_v * BV,
i * BT), (BV, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i * BT,
i_v * BV), (BT, BV), (1, 0))
p_dq = tl.make_block_ptr(dq + (i_bh + i_v * B * H) * T * K, (T, K),
(K, 1), (i * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dd = (b_do * d_q[:, None]).to(b_do.dtype)
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
b_ds = (b_ds * d_s).to(b_k.dtype)
b_dq = tl.dot(b_ds, b_k, allow_tf32=False)
if CHECK and i == 0:
b_dq += tl.dot(b_dd, b_h.to(b_k.dtype), allow_tf32=False)
b_h = d_b * b_h + tl.dot((b_v * d_k[None, :]).to(b_k.dtype),
b_k, allow_tf32=False)
else:
b_dq += tl.dot(b_dd, b_h.to(b_k.dtype), allow_tf32=False)
b_h = d_b * b_h + tl.dot((b_v * d_k[None, :]).to(b_k.dtype),
b_k, allow_tf32=False)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
b_h = None
tl.debug_barrier()
d_s = tl.trans(d_s)
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
for i in range(1, tl.cdiv(T, BT) + 1):
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k * BK,
T - i * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (T - i *
BT, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (T - i *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (T - i *
BT, i_v * BV), (BT, BV), (1, 0))
p_dk = tl.make_block_ptr(dk + (i_bh + i_v * B * H) * T * K, (T, K),
(K, 1), (T - i * BT, i_k * BK), (BT, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_bh + i_k * B * H) * T * V, (T, V),
(V, 1), (T - i * BT, i_v * BV), (BT, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dd = (b_do * d_q[:, None]).to(b_do.dtype)
b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False)
b_ds = (b_ds * d_s).to(b_k.dtype)
b_s = tl.dot(b_k, b_q, allow_tf32=False) * d_s
b_dk = tl.dot(b_ds, tl.trans(b_q), allow_tf32=False)
b_dv = tl.dot(b_s.to(b_q.dtype), b_do, allow_tf32=False)
if CHECK and i == 1:
b_dk += tl.dot(b_v, tl.trans(b_dh).to(b_v.dtype), allow_tf32=False
) * d_k[:, None]
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False) * d_k[
:, None]
b_dh = d_b * b_dh + tl.dot(b_q, b_dd, allow_tf32=False)
else:
b_dk += tl.dot(b_v, tl.trans(b_dh).to(b_v.dtype), allow_tf32=False
) * d_k[:, None]
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False) * d_k[
:, None]
b_dh = d_b * b_dh + tl.dot(b_q, b_dd, allow_tf32=False)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/fused_chunk.py |
fdc8c514-97dc-4a5b-b5ee-8cb84a245397 | lightseq_async_attn.py | EvolvingLMMs-Lab/LongVA | easy_context/dist_flash_attn/lightseq_async_attn.py | 76b7c33946936361eeb5a18b2c9fcc5fe63e9434 | 0 | @triton.jit
def _rescale_kernel(peer_m, m, peer_l, l, peer_o, o, L, stride_oz,
stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, LAST_STEP: tl.constexpr
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
o_offset = off_hz * stride_oh
peer_o_block_ptr = tl.make_block_ptr(base=peer_o + o_offset, shape=(
N_CTX, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(
start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(
1, 0))
o_block_ptr = tl.make_block_ptr(base=o + o_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
peer_m_ptrs = peer_m + off_hz * N_CTX + offs_m
m_ptrs = m + off_hz * N_CTX + offs_m
peer_l_ptrs = peer_l + off_hz * N_CTX + offs_m
l_ptrs = l + off_hz * N_CTX + offs_m
peer_m_i = tl.load(peer_m_ptrs)
peer_m_i = peer_m_i.to(tl.float32)
m_i = tl.load(m_ptrs)
m_i = m_i.to(tl.float32)
peer_l_i = tl.load(peer_l_ptrs)
peer_l_i = peer_l_i.to(tl.float32)
l_i = tl.load(l_ptrs)
l_i = l_i.to(tl.float32)
peer_acc = tl.load(peer_o_block_ptr)
peer_acc = peer_acc.to(tl.float32)
acc = tl.load(o_block_ptr)
acc = acc.to(tl.float32)
lo = 0
hi = N_CTX
m_i_sync = tl.maximum(m_i, peer_m_i)
alpha = tl.math.exp2(m_i - m_i_sync)
peer_alpha = tl.math.exp2(peer_m_i - m_i_sync)
acc_scale = l_i * 0 + alpha
peer_acc_scale = peer_l_i * 0 + peer_alpha
acc *= acc_scale[:, None]
peer_acc *= peer_acc_scale[:, None]
acc += peer_acc
l_i = l_i * acc_scale + peer_l_i * peer_acc_scale
tl.store(m_ptrs, m_i_sync)
tl.store(l_ptrs, l_i)
if LAST_STEP:
acc = acc / l_i[:, None]
L_ptrs = L + off_hz * N_CTX + offs_m
tl.store(L_ptrs, m_i_sync / 1.44269504 + tl.math.log(l_i))
tl.store(o_block_ptr, acc.to(tl.bfloat16))
| {
"Data Type": [
"fp32",
"bf16"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/EvolvingLMMs-Lab/LongVA/blob/76b7c33946936361eeb5a18b2c9fcc5fe63e9434/easy_context/dist_flash_attn/lightseq_async_attn.py |
04b500e9-43c3-4a8d-8bdf-3fa2cecce9bc | prefix_prefill.py | IBM/vllm | vllm/attention/ops/prefix_prefill.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _fwd_kernel(Q, K, V, K_cache, V_cache, B_Loc, sm_scale, k_scale,
v_scale, B_Start_Loc, B_Seqlen, B_Ctxlen, block_size, x, Out,
stride_b_loc_b, stride_b_loc_s, stride_qbs, stride_qh, stride_qd,
stride_kbs, stride_kh, stride_kd, stride_vbs, stride_vh, stride_vd,
stride_obs, stride_oh, stride_od, stride_k_cache_bs, stride_k_cache_h,
stride_k_cache_d, stride_k_cache_bl, stride_k_cache_x,
stride_v_cache_bs, stride_v_cache_h, stride_v_cache_d,
stride_v_cache_bl, num_queries_per_kv: int, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_DMODEL_PADDED: tl.constexpr, BLOCK_N:
tl.constexpr, SLIDING_WINDOW: tl.constexpr):
cur_batch = tl.program_id(0)
cur_head = tl.program_id(1)
start_m = tl.program_id(2)
cur_kv_head = cur_head // num_queries_per_kv
cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch)
cur_batch_seq_len = tl.load(B_Seqlen + cur_batch)
cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch)
cur_batch_query_len = cur_batch_seq_len - cur_batch_ctx_len
block_start_loc = BLOCK_M * start_m
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL_PADDED)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_q = (cur_batch_in_all_start_index + offs_m[:, None]
) * stride_qbs + cur_head * stride_qh + offs_d[None, :] * stride_qd
dim_mask = tl.where(tl.arange(0, BLOCK_DMODEL_PADDED) < BLOCK_DMODEL, 1, 0
).to(tl.int1)
q = tl.load(Q + off_q, mask=dim_mask[None, :] & (offs_m[:, None] <
cur_batch_query_len), other=0.0)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL_PADDED], dtype=tl.float32)
for start_n in range(0, cur_batch_ctx_len, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
bn = tl.load(B_Loc + cur_batch * stride_b_loc_b + (start_n + offs_n
) // block_size * stride_b_loc_s, mask=start_n + offs_n <
cur_batch_ctx_len, other=0)
off_k = bn[None, :
] * stride_k_cache_bs + cur_kv_head * stride_k_cache_h + offs_d[
:, None] // x * stride_k_cache_d + (start_n + offs_n[None, :]
) % block_size * stride_k_cache_bl + offs_d[:, None
] % x * stride_k_cache_x
off_v = bn[:, None
] * stride_v_cache_bs + cur_kv_head * stride_v_cache_h + offs_d[
None, :] * stride_v_cache_d + (start_n + offs_n[:, None]
) % block_size * stride_v_cache_bl
k_load = tl.load(K_cache + off_k, mask=dim_mask[:, None] & (start_n +
offs_n[None, :] < cur_batch_ctx_len), other=0.0)
if k_load.dtype.is_fp8():
k = (k_load.to(tl.float32) * k_scale).to(q.dtype)
else:
k = k_load
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
qk = tl.where(start_n + offs_n[None, :] < cur_batch_ctx_len, qk,
float('-inf'))
qk *= sm_scale
if SLIDING_WINDOW > 0:
qk = tl.where(cur_batch_ctx_len + offs_m[:, None] - (start_n +
offs_n[None, :]) < SLIDING_WINDOW, qk, -10000)
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
p_scale = beta / l_i_new
p = p * p_scale[:, None]
acc_scale = l_i / l_i_new * alpha
acc = acc * acc_scale[:, None]
v_load = tl.load(V_cache + off_v, mask=dim_mask[None, :] & (start_n +
offs_n[:, None] < cur_batch_ctx_len), other=0.0)
if v_load.dtype.is_fp8():
v = (v_load.to(tl.float32) * v_scale).to(q.dtype)
else:
v = v_load
p = p.to(v.dtype)
acc += tl.dot(p, v)
l_i = l_i_new
m_i = m_i_new
off_k = offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + offs_d[
:, None] * stride_kd
off_v = offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + offs_d[
None, :] * stride_vd
k_ptrs = K + off_k
v_ptrs = V + off_v
block_mask = tl.where(block_start_loc < cur_batch_query_len, 1, 0)
for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(k_ptrs + (cur_batch_in_all_start_index + start_n) *
stride_kbs, mask=dim_mask[:, None] & (start_n + offs_n[None, :] <
cur_batch_query_len), other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
qk *= sm_scale
qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk,
float('-inf'))
if SLIDING_WINDOW > 0:
qk = tl.where(offs_m[:, None] - (start_n + offs_n[None, :]) <
SLIDING_WINDOW, qk, -10000)
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
p_scale = beta / l_i_new
p = p * p_scale[:, None]
acc_scale = l_i / l_i_new * alpha
acc = acc * acc_scale[:, None]
v = tl.load(v_ptrs + (cur_batch_in_all_start_index + start_n) *
stride_vbs, mask=dim_mask[None, :] & (start_n + offs_n[:, None] <
cur_batch_query_len), other=0.0)
p = p.to(v.dtype)
acc += tl.dot(p, v)
l_i = l_i_new
m_i = m_i_new
off_o = (cur_batch_in_all_start_index + offs_m[:, None]
) * stride_obs + cur_head * stride_oh + offs_d[None, :] * stride_od
out_ptrs = Out + off_o
tl.store(out_ptrs, acc, mask=dim_mask[None, :] & (offs_m[:, None] <
cur_batch_query_len))
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/attention/ops/prefix_prefill.py |
2f8b0d83-5616-4c9c-832e-4472431e9e84 | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _inner_paged_attn_unroll_0_kernel(q, k_cache, v_cache, stride_km,
block_base_ptrs, base_offs_kv, alibi_slope, block_offs, seq_len, qkv,
qk_max, exp_sum, BLOCK_SIZE: tl.constexpr, LO: tl.constexpr, HI: tl.
constexpr):
for block_idx in range(LO, HI, 1):
offs_kv_0 = tl.load(block_base_ptrs + block_idx + 0
) * stride_km + base_offs_kv
k_0 = tl.load(k_cache + offs_kv_0)
v_0 = tl.load(v_cache + offs_kv_0)
_qk_0 = tl.sum((q[None, :] * k_0).to(tl.float32), axis=1)
if alibi_slope is not None:
_qk_0 += alibi_slope * ((block_idx + 0) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_max = tl.maximum(tl.max(_qk_0, axis=0), qk_max)
exp_tmp = tl.exp(_qk_0 - _qk_max)
_exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(exp_tmp, axis=0)
qkv_sum_tmp = tl.exp(_qk_0[:, None] - _qk_max).to(v_cache.dtype.
element_ty) * v_0
qkv = (qkv * (exp_sum * tl.exp(qk_max - _qk_max)) + qkv_sum_tmp
) / _exp_sum
qk_max = _qk_max
exp_sum = _exp_sum
return qkv, qk_max, exp_sum
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
5ae29aea-e9ab-4b04-990c-d208c7ac02ac | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.jit
def triton_sum_kernel_scalar_result(input_ptr, output_ptr, M, BLOCK_SIZE_M:
tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE_M
offsets = block_start + tl.arange(0, BLOCK_SIZE_M)
mask = offsets < M
x = tl.load(input_ptr + offsets, mask=mask, other=mask)
output = tl.sum(x)
output_offsets = tl.arange(0, 1)
tl.store(output_ptr + output_offsets, output)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py |
09deca13-45a1-459d-85c5-acec7312d548 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2)], key=['BT'])
@triton.jit
def save_intra_chunk_attn(A, A_local, T: tl.constexpr, BT: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
p_A = tl.make_block_ptr(A + i_bh * T * T, (T, T), (T, 1), (i_t * BT,
i_t * BT), (BT, BT), (1, 0))
p_A_local = tl.make_block_ptr(A_local + i_bh * T * BT, (T, BT), (BT, 1),
(i_t * BT, 0), (BT, BT), (1, 0))
b_A_local = tl.load(p_A_local, boundary_check=(0, 1))
tl.store(p_A, b_A_local.to(p_A.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/parallel.py |
9c1a7f44-002d-4db7-b1da-ef6ca965c300 | blocksparse_sum.py | kimiasa/Experiments | src/models/attention/blocksparse_sum.py | c4e73bfefd8290695ec52b6386b6b81838ca94a1 | 0 | @triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] *
meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3] *
meta['BLOCK'])})
@triton.jit
def _forward(X, OUT, LUT, sizemax, stride_zx, stride_zout, stride_hout, **meta
):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=0)
x = x.to(tl.float32)
out = tl.sum(x, axis=0)
pout = (OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK +
rxm)
tl.store(pout, out)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/models/attention/blocksparse_sum.py |
3d84e0a7-71fe-46f5-a28a-531bf86d60ad | 06-fused-attention.py | 2lambda123/triton | python/tutorials/06-fused-attention.py | 09e27725b89043a07f49c440db6a9aedcfba8432 | 0 | @triton.jit
def _bwd_kernel(Q, K, V, sm_scale, Out, DO, DQ, DK, DV, L, D, stride_qz,
stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn,
stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, Z, H, N_CTX,
num_block, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N:
tl.constexpr, CAUSAL: tl.constexpr):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
qk_scale = sm_scale * 1.44269504
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
if CAUSAL:
lo = start_n * BLOCK_M
else:
lo = 0
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] *
stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk
)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk
)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] *
stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] *
stride_qk)
D_ptrs = D + off_hz * N_CTX
l_ptrs = L + off_hz * N_CTX
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
q = tl.load(q_ptrs)
if CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :],
float(0.0), float('-inf'))
else:
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
qk *= qk_scale
l_i = tl.load(l_ptrs + offs_m_curr)
p = tl.math.exp2(qk - l_i[:, None])
do = tl.load(do_ptrs)
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
ds = p * dp * sm_scale
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
dq = tl.load(dq_ptrs)
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq)
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] *
stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] *
stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/2lambda123/triton/blob/09e27725b89043a07f49c440db6a9aedcfba8432/python/tutorials/06-fused-attention.py |
02bde305-1b40-42a5-b808-be8ee0f8f362 | outer_softmax_online.py | iclementine/optimize_softmax | outer_softmax_online.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def next_multiple_of(a, b):
return tl.cidv(a, b) * b
| {
"Data Type": [
"int8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/outer_softmax_online.py |
35ba6a63-cf05-402d-aa41-8b691e1e0c2e | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def bounds_mask(offs_m, offs_n, M, N, EVEN_M: tl.constexpr, EVEN_N: tl.
constexpr):
if EVEN_M & EVEN_N:
val = 0
else:
mask = make_bounds(offs_m, offs_n, M, N, EVEN_M, EVEN_N)
val = tl.where(mask, 0, float('-inf'))
return val
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Latency Sensitive"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
e0ec9d80-09d9-4544-b5fb-203ac2fa2c98 | test_inductor.py | triton-lang/kernels | test/test_inductor.py | eeeebdd8be7d13629de22d600621e6234057eed3 | 0 | @triton.jit
def fn(out_ptr0, rnumel, RBLOCK: tl.constexpr):
rbase = tl.arange(0, RBLOCK)[None, :]
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
tmp3 = tl.where(rmask, 1, 0)
tmp6 = tl.cumsum(tmp3, 1)
tl.store(out_ptr0 + rindex, tmp6, rmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/test/test_inductor.py |
c629a162-a019-40d7-b34a-4b77786b3e07 | RzLinearIdx.py | apd10/RzLinear | python/rz_linear/impl/RzLinearIdx.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def rz_linear_idx_kernel(bh_ptr, b_ptr, K, N, H, R3, R2, R1, R0, stride_bk,
stride_bn, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr):
pid = tl.program_id(axis=0)
grid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_k = pid // grid_n
pid_n = pid % grid_n
bh_offset = bh_ptr + tl.arange(0, BLOCK_SIZE_K)[:, None
] * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
bh_ptrs = bh_offset + (pid_k * R3 + pid_n * R2 + R1) % R0 % (H -
BLOCK_SIZE_K * BLOCK_SIZE_N)
b_ptrs = (b_ptr + pid_k * BLOCK_SIZE_K * stride_bk + pid_n *
BLOCK_SIZE_N * stride_bn + tl.arange(0, BLOCK_SIZE_K)[:, None] *
stride_bk + tl.arange(0, BLOCK_SIZE_N)[None, :])
bh = tl.load(bh_ptrs)
tl.store(b_ptrs, bh)
| {
"Data Type": [
"fp32"
],
"Functionality": [],
"Memory Access Pattern": [
"Transposed Access",
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearIdx.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.