uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
6d32b21d-530f-4d1d-a2b2-bb08a0c056cc | triton_matric_matmul.py | elphinkuo/fast_matrix_multiplication | dot_product/on_gpu/Triton/triton_matric_matmul.py | 4e875a17e95b7ccf9af102d2c0f8cc31ed9a29f3 | 0 | @triton.jit
def _matmul_kernel(A, B, C, M, N, K, **meta):
TILE_M = meta['BLOCK_M']
TILE_N = meta['BLOCK_N']
TILE_K = 128
m = tl.program_id(0) * TILE_M + tl.arange(0, TILE_M)
n = tl.program_id(1) * TILE_N + tl.arange(0, TILE_N)
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(0, K, TILE_K):
a = tl.load(A + m[:, None] * K + k, mask=[m[:, None] < M, None],
other=0.0)
b = tl.load(B + k * N + n, mask=[None, n < N], other=0.0)
acc += tl.dot(a, b)
tl.store(C + m[:, None] * N + n, acc, mask=[m[:, None] < M, n < N])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/elphinkuo/fast_matrix_multiplication/blob/4e875a17e95b7ccf9af102d2c0f8cc31ed9a29f3/dot_product/on_gpu/Triton/triton_matric_matmul.py |
1325bcba-7d28-47b5-b18b-31e0f79878f5 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_2_softmax_kernel(input_ptr, output_ptr, offsets_row_ptr,
offsets_col_ptr, offsets_overall_ptr, input_stride, output_stride,
transpose, max_seq_len_row, max_seq_len_col, BLOCK_SIZE: tl.constexpr):
"""
input shape is [sum_B(Ni * Hi)]
output shape is [sum_B(Ni * Hi)]
Padded version = [B, N, H]
Calculate softmax alone N dim
Each kernel calulates softmax for 1 sample and 1 head
offsets_row.size == offsets_col.size == offsets_overall.size
"""
pid_batch = tl.program_id(0)
pid_head = tl.program_id(1)
begin = tl.load(offsets_overall_ptr + pid_batch)
if transpose:
N = tl.load(offsets_row_ptr + pid_batch + 1) - tl.load(
offsets_row_ptr + pid_batch)
H = tl.load(offsets_col_ptr + pid_batch + 1) - tl.load(
offsets_col_ptr + pid_batch)
stride_n = H
stride_h = H // H
H = tl.minimum(max_seq_len_col, H)
N = tl.minimum(max_seq_len_row, N)
else:
N = tl.load(offsets_col_ptr + pid_batch + 1) - tl.load(
offsets_col_ptr + pid_batch)
H = tl.load(offsets_row_ptr + pid_batch + 1) - tl.load(
offsets_row_ptr + pid_batch)
stride_h = N
stride_n = N // N
H = tl.minimum(max_seq_len_row, H)
N = tl.minimum(max_seq_len_col, N)
if pid_head >= H:
return
if H == 0 or N == 0:
return
start_ptr = input_ptr + begin * input_stride
offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = (start_ptr + offsets * input_stride * stride_n + pid_head *
input_stride * stride_h)
row = tl.load(input_ptrs, mask=offsets < N, other=-float('inf'))
row_mins_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_mins_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_start_ptr = output_ptr + begin * output_stride
output_ptrs = (output_start_ptr + offsets * output_stride * stride_n +
pid_head * output_stride * stride_h)
tl.store(output_ptrs, softmax_output, mask=offsets < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
c5681889-cbdf-4c8c-b730-916ee6ccb0d9 | triton_ops.py | huyz2023/2by4-pretrain | sparse/triton_ops.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _MVUE24_approx_triton(dense_ptr, sparse_ptr, dense_row_stride,
sparse_row_stride, dense_col_stride, sparse_col_stride, m, k, seed,
BLOCK_SIZE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr):
if ARRAY_LAYOUT == 'row':
row_idx = tl.program_id(0)
col_idx = tl.program_id(1) * 4 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE
) * 4
mask = col_idx < k
elif ARRAY_LAYOUT == 'col':
row_idx = tl.arange(0, BLOCK_SIZE) + tl.program_id(0) * BLOCK_SIZE
col_idx = tl.program_id(1) * 4
mask = row_idx < m
dense_40 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
0) * dense_col_stride, mask=mask)
dense_41 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
1) * dense_col_stride, mask=mask)
dense_42 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
2) * dense_col_stride, mask=mask)
dense_43 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
3) * dense_col_stride, mask=mask)
if ARRAY_LAYOUT == 'row':
seed0 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2
seed1 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2 + 1
else:
seed0 = seed + (tl.program_id(0) * k // 16 + tl.program_id(1)) * 2
seed1 = seed + (tl.program_id(0) * k // 16 + tl.program_id(1)) * 2 + 1
random0 = tl.rand(seed0, tl.arange(0, BLOCK_SIZE), n_rounds=5)
random1 = tl.rand(seed1, tl.arange(0, BLOCK_SIZE), n_rounds=5)
dense_40, dense_41, dense_42, dense_43, m0, m1, m2, m3 = _MVUE24_approx(
dense_40, dense_41, dense_42, dense_43, random0, random1)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 0) *
sparse_col_stride, dense_40, mask=mask & m0)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 1) *
sparse_col_stride, dense_41, mask=mask & m1)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 2) *
sparse_col_stride, dense_42, mask=mask & m2)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 3) *
sparse_col_stride, dense_43, mask=mask & m3)
| {
"Data Type": [
"fp32",
"uint8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/triton_ops.py |
ae2cb3a3-be88-4b7a-bbba-b418ac601259 | parallel_scan.py | chengkai-liu/RecBLR | parallel_scan.py | 66e520c26e28c05a5425ba2e81c9169b7e0176e2 | 0 | @triton.jit
def unpack64(merged):
tl.static_assert(merged.dtype == tl.uint64)
b = (merged & 4294967295).to(tl.uint32).to(tl.float32, bitcast=True)
a = (merged >> 32).to(tl.uint32).to(tl.float32, bitcast=True)
return a, b
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py |
f031d842-9ab5-40cf-b113-7fe0ef2ae51e | y_5.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_5.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def fifth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset <
output_numel)
g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask=
output_row_offset + 7 < output_numel)
g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask=
output_row_offset + 8 < output_numel)
g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask=
output_row_offset + 9 < output_numel)
g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask=
output_row_offset + 10 < output_numel)
CONST000 = 1.60565407233314
CONST001 = 3.0
CONST002 = 3.21130814466628
CONST003 = 1.60565407233314
CONST004 = 6.42261628933256
CONST005 = 6.42261628933256
CONST006 = 8.67152307844476
CONST007 = 8.02827036166571
CONST008 = 6.9372184627558
CONST009 = 11.6340690431164
CONST010 = 12.8452325786651
CONST011 = 6.21867148191637
CONST012 = 6.21867148191637
CONST014 = 12.4373429638327
CONST017 = 12.8452325786651
CONST018 = 13.8744369255116
CONST019 = 24.8746859276655
CONST020 = 24.8746859276655
CONST021 = 27.7488738510232
CONST024 = 29.4321253055229
CONST027 = 7.35803132638072
CONST029 = 46.5362761724657
CONST030 = 51.3809303146605
CONST031 = 51.3809303146605
CONST034 = 101.955872807799
CONST036 = -8.67152307844475
CONST037 = 3.4686092313779
CONST038 = -88.2963759165686
CONST039 = -83.2466215530696
CONST040 = -69.8044142586986
CONST041 = -50.9779364038993
CONST042 = -50.9779364038993
CONST043 = -46.5362761724657
CONST044 = -44.1481879582843
CONST045 = -41.6233107765348
CONST046 = -38.5356977359954
CONST047 = -38.5356977359954
CONST048 = -33.166247903554
CONST049 = -33.9852909359329
CONST050 = 6.42261628933257
CONST051 = -33.9852909359329
CONST052 = -29.4321253055229
CONST053 = -27.7488738510232
CONST054 = -20.8116553882674
CONST055 = -19.2678488679977
CONST056 = -19.2678488679977
CONST057 = -16.9926454679664
CONST058 = -16.9926454679664
CONST059 = -13.8744369255116
CONST060 = -16.583123951777
CONST061 = -8.49632273398321
CONST062 = -6.9372184627558
CONST063 = -5.20291384706685
CONST064 = -3.4686092313779
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += g_0 * (CONST009 * VAR06 + CONST009 * VAR24 + CONST040 * VAR08 *
VAR26) + g_1 * y * (CONST038 * VAR08 * z - CONST052 * VAR25) + g_10 * (
CONST029 * VAR07 * z + CONST043 * VAR25 * x) + g_2 * (CONST001 *
VAR08 * (CONST059 * VAR17 + CONST064 * VAR26) + CONST006 * VAR06 -
CONST045 * VAR17 * VAR26 + CONST063 * VAR24) + g_3 * (CONST041 *
VAR08 * y * z - CONST049 * VAR16 * z + CONST057 * VAR25 * y) + g_4 * (
CONST000 * VAR24 + CONST001 * VAR08 * (CONST002 * VAR26 + CONST055 *
VAR17) + CONST007 * VAR06 + CONST010 * VAR15 + CONST056 * VAR17 * VAR26
) + g_5 * (CONST048 * VAR16 * x + y * (CONST019 * VAR07 + CONST019 *
VAR26 * x)) + g_6 * (CONST005 * VAR25 * x + z * (CONST004 * VAR07 +
CONST046 * VAR17 * x)) + g_7 * (CONST049 * VAR16 * x - CONST051 *
VAR07 * y) + g_8 * (CONST008 * VAR25 * x + z * (CONST039 * VAR17 *
x - CONST054 * VAR07)) + g_9 * y * (CONST024 * VAR07 + CONST038 *
VAR26 * x)
g_y += g_1 * (CONST052 * VAR07 * z - CONST052 * VAR25 * x) + g_2 * (-
CONST039 * VAR26 * x * y + CONST053 * VAR07 * y) + g_3 * (CONST058 *
VAR07 * z + x * (CONST034 * VAR17 * z + CONST057 * VAR25)) + g_4 * (
CONST047 * VAR07 * y + x * (CONST030 * VAR16 + CONST046 * VAR26 * y)
) + g_5 * (CONST001 * VAR17 * (CONST060 * VAR08 + CONST060 * VAR26) +
CONST011 * VAR06 + CONST012 * VAR24 + CONST014 * VAR08 * VAR26 -
CONST060 * VAR15) + g_6 * (CONST046 * VAR25 * y + z * (CONST031 *
VAR16 + CONST046 * VAR08 * y)) + g_7 * (CONST001 * VAR17 * (
CONST057 * VAR08 - CONST057 * VAR26) - CONST061 * VAR06 + CONST061 *
VAR24) + g_8 * (CONST021 * VAR25 * y + CONST039 * VAR08 * y * z
) + g_9 * (CONST027 * VAR06 + CONST027 * VAR24 + CONST044 * VAR08 *
VAR26)
g_z += g_0 * (CONST029 * VAR25 * x + CONST043 * VAR07 * z) + g_1 * y * (
-CONST038 * VAR26 * x + CONST052 * VAR07) + g_10 * (CONST009 *
VAR06 + CONST009 * VAR24 + CONST040 * VAR08 * VAR26) + g_2 * (
CONST062 * VAR07 * z + x * (-CONST039 * VAR17 * z + CONST054 * VAR25)
) + g_3 * (CONST058 * VAR07 * y + x * (CONST042 * VAR26 * y -
CONST049 * VAR16)) + g_4 * (CONST005 * VAR07 * z + x * (CONST046 *
VAR17 * z + CONST050 * VAR25)) + g_5 * (CONST048 * VAR16 * z + y *
(CONST019 * VAR08 * z + CONST020 * VAR25)) + g_6 * (CONST001 *
VAR26 * (CONST002 * VAR08 + CONST056 * VAR17) + CONST003 * VAR06 +
CONST007 * VAR24 + CONST017 * VAR15 + CONST056 * VAR08 * VAR17
) + g_7 * (-CONST049 * VAR16 * z + CONST051 * VAR25 * y) + g_8 * (
CONST001 * VAR26 * (CONST018 * VAR17 + CONST037 * VAR08) + CONST036 *
VAR24 + CONST045 * VAR08 * VAR17 - CONST063 * VAR06) + g_9 * y * (
CONST024 * VAR25 + CONST038 * VAR08 * z)
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_5.py |
e4b5235d-37a5-4b17-8c48-fa82e3aecf4f | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.autotune(configs=[triton.Config({'UNROLL_FACTOR': uf}) for uf in [1,
2, 4, 8]], key=['POWER_OF_2_MAX_SEQ_LEN', 'QUERY_GROUP_SIZE',
'USE_PARTITIONING', 'BLOCK_SIZE', 'HEAD_SIZE', 'PARTITION_SIZE'])
@triton.jit
def _paged_attn_wo_mma_kernel(exp_sums, max_logits, out, q, k_cache,
v_cache, scale, block_tables, seq_lens, max_num_blocks_per_seq,
alibi_slopes, stride_qm, stride_qn, stride_om, stride_on, stride_ok,
stride_km, stride_kn, stride_kk, stride_exp_m, stride_exp_n, BLOCK_SIZE:
tl.constexpr, HEAD_SIZE: tl.constexpr, QUERY_GROUP_SIZE: tl.constexpr,
PARTITION_SIZE: tl.constexpr, POWER_OF_2_MAX_SEQ_LEN: tl.constexpr,
USE_PARTITIONING: tl.constexpr, UNROLL_FACTOR: tl.constexpr):
head_idx = tl.program_id(axis=0)
kv_head_idx = head_idx // QUERY_GROUP_SIZE
seq_idx = tl.program_id(axis=1)
par_idx = tl.program_id(axis=2)
seq_len = tl.load(seq_lens + seq_idx)
if par_idx * PARTITION_SIZE >= seq_len:
return
num_context_blocks = tl.cdiv(seq_len, BLOCK_SIZE)
if USE_PARTITIONING:
num_blocks_per_par = PARTITION_SIZE // BLOCK_SIZE
start_block_idx = par_idx * num_blocks_per_par
end_block_idx = tl.minimum(start_block_idx + num_blocks_per_par,
num_context_blocks)
else:
start_block_idx = 0
end_block_idx = num_context_blocks
if alibi_slopes is None:
alibi_slope = 0.0
else:
alibi_slope = tl.load(alibi_slopes + head_idx)
block_offs = tl.arange(0, BLOCK_SIZE)
head_size_offs = tl.arange(0, HEAD_SIZE)
q = tl.load(q + seq_idx * stride_qm + head_idx * stride_qn + head_size_offs
)
q = (q * scale).to(tl.float16)
qkv = tl.zeros([BLOCK_SIZE, HEAD_SIZE], dtype=tl.float32)
qk_max = float('-inf')
exp_sum = 0.0
fp16_0 = tl.zeros([1, 1], dtype=k_cache.dtype.element_ty)
base_offs_kv = kv_head_idx * stride_kn + block_offs[:, None
] * stride_kk + head_size_offs[None, :]
block_base_ptrs = block_tables + seq_idx * max_num_blocks_per_seq
hi_unroll = (end_block_idx - 1) // UNROLL_FACTOR * UNROLL_FACTOR
if UNROLL_FACTOR == 1:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_0_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
elif UNROLL_FACTOR == 2:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_2_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
elif UNROLL_FACTOR == 4:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_4_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
elif UNROLL_FACTOR == 8:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_8_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
tl.debug_barrier()
for block_idx in range(hi_unroll, end_block_idx):
physical_block_idx = tl.load(block_tables + seq_idx *
max_num_blocks_per_seq + block_idx)
mask = block_offs[:, None] < seq_len - block_idx * BLOCK_SIZE
offs_kv = physical_block_idx * stride_km + base_offs_kv
k = tl.load(k_cache + offs_kv, mask=mask, other=fp16_0)
v = tl.load(v_cache + offs_kv, mask=mask, other=fp16_0)
_qk = tl.sum((q[None, :] * k).to(tl.float32), axis=1)
_qk = tl.where(block_offs < seq_len - block_idx * BLOCK_SIZE, _qk,
float('-inf'))
_qk += alibi_slope * (block_idx * BLOCK_SIZE + block_offs - seq_len + 1
)
_qk_max = tl.maximum(tl.max(_qk, axis=0), qk_max)
_exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(tl.exp(_qk -
_qk_max), axis=0)
qkv = qkv * (exp_sum * tl.exp(qk_max - _qk_max)) + tl.exp(_qk[:,
None] - _qk_max) * v
qkv = qkv / _exp_sum
qk_max = _qk_max
exp_sum = _exp_sum
if USE_PARTITIONING:
offs_exp = seq_idx * stride_exp_m + head_idx * stride_exp_n + par_idx
tl.store(exp_sums + offs_exp, exp_sum)
tl.store(max_logits + offs_exp, qk_max)
offs_out = (seq_idx * stride_om + head_idx * stride_on + par_idx *
stride_ok + head_size_offs)
tl.store(out + offs_out, tl.sum(qkv, axis=0))
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
cd64708f-5721-4a20-be92-b7c64e1762ca | GELUglu.py | huyz2023/2by4-pretrain | sparse/GELUglu.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _gelu_glu_fwd_kernel(output_ptr, input_ptr, output_row_stride,
input_row_stride, output_col_stride, input_col_stride,
output_page_stride, input_page_stride, n_pages, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_idx = tl.program_id(1)
x = tl.load(input_ptr + row_idx * input_row_stride + col_idx *
input_col_stride + tl.arange(0, BLOCK_SIZE // 2) *
input_page_stride, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages //
2, other=-float('inf'))
gate = tl.load(input_ptr + row_idx * input_row_stride + col_idx *
input_col_stride + (tl.arange(0, BLOCK_SIZE // 2) + n_pages // 2) *
input_page_stride, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages //
2, other=-float('inf'))
gate_cube = gate * gate * gate
beta = 0.7978845608028654
kappa = 0.044715
inner = beta * (gate + kappa * gate_cube)
inner_tanh = tanh(inner)
gate_gelu = 0.5 * gate * (inner_tanh + 1)
gelu_glu = gate_gelu * x
tl.store(output_ptr + row_idx * output_row_stride + col_idx *
output_col_stride + tl.arange(0, BLOCK_SIZE // 2) *
output_page_stride, gelu_glu, mask=tl.arange(0, BLOCK_SIZE // 2) <
n_pages // 2)
| {
"Data Type": [],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py |
e3441201-2cc6-4bc0-b20c-0cd97d2fe333 | triton_welford.py | pytorch-labs/tritonbench | tritonbench/operators/welford/triton_welford.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'XBLOCK': 1, 'RBLOCK': 1024},
num_stages=1, num_warps=8), triton.Config({'XBLOCK': 1, 'RBLOCK': 2048},
num_stages=1, num_warps=8)], key=['xnumel', 'rnumel'])
@triton.jit
def triton_red_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask, eviction_policy
='evict_last').to(tl.float32)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr0 + x0, tmp3, None)
tmp6 = rnumel
tmp7 = tmp4 / tmp6
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp10, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp11 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask,
eviction_policy='evict_first').to(tl.float32)
tmp15 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last').to(
tl.float32)
tmp18 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last').to(
tl.float32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 - tmp3
tmp14 = tmp13 * tmp10
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 * tmp16
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 + tmp19
tmp21 = tmp20.to(tl.float32)
tl.store(out_ptr1 + (r1 + rnumel * x0), tmp21, rmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/welford/triton_welford.py |
b6bbdca6-8c98-4528-a967-b358c90a1d6f | triton_fused_local_attn.py | LouChao98/vqtree | ops/triton_fused_local_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, sm_scale, K_block_ptr, V_block_ptr,
start_m, offs_m, offs_n, SEQLEN_K: tl.constexpr, WINDOW_SIZE: tl.
constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_MN: tl.
constexpr, STAGE: tl.constexpr):
if STAGE == 1:
hi = start_m * BLOCK_M - WINDOW_SIZE + BLOCK_M
lo = start_m * BLOCK_M - WINDOW_SIZE
if hi < 0:
hi = 0
if lo < 0:
lo = 0
elif STAGE == 2:
hi = start_m * BLOCK_M
lo = start_m * BLOCK_M - WINDOW_SIZE + BLOCK_M
if lo < 0:
lo = 0
else:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
hi = min(hi, SEQLEN_K)
EVEN_MASK_FREE = EVEN_MN & ((STAGE == 1) | (STAGE == 2))
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_MASK_FREE:
k = tl.load(K_block_ptr)
else:
k = tl.load(K_block_ptr, boundary_check=(1,), padding_option='zero'
)
qk = tl.dot(q, k) * (sm_scale * RCP_LN2)
if STAGE == 1:
mask = offs_m[:, None] <= start_n + WINDOW_SIZE + offs_n[None, :]
qk += tl.where(mask, 0, NEGINF)
elif STAGE == 3:
mask = offs_m[:, None] >= start_n + offs_n[None, :]
qk += tl.where(mask, 0, NEGINF)
if not EVEN_MASK_FREE:
qk += tl.where((start_n + offs_n)[None, :] < SEQLEN_K, 0, NEGINF)
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
acc *= alpha[:, None]
if EVEN_MASK_FREE:
v = tl.load(V_block_ptr)
else:
v = tl.load(V_block_ptr, boundary_check=(1,), padding_option='zero'
)
acc += tl.dot(p.to(V_block_ptr.dtype.element_ty), v)
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
return acc, l_i, m_i
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn.py |
362d6816-df21-46a1-b625-bc3f25aab424 | 06-fused-attention.py | triton-lang/triton | python/tutorials/06-fused-attention.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _attn_bwd_dkdv(dk, dv, Q, k, v, sm_scale, DO, M, D, stride_tok,
stride_d, H, N_CTX, BLOCK_M1: tl.constexpr, BLOCK_N1: tl.constexpr,
HEAD_DIM: tl.constexpr, start_n, start_m, num_steps, MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
if MASK:
mask = offs_m[None, :] >= offs_n[:, None]
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
Di = tl.load(D + offs_m)
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py |
465954ee-4cfe-46e9-8668-a230f02bb257 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_dense_bmm_kernel(a_ptr, a_offset_ptr, b_ptr, c_ptr, N, K,
stride_am, stride_ak, stride_bl, stride_bk, stride_bn, stride_cm,
stride_cn, max_seq_len, allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl.
constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (sum_B(M_i), K), B has shape (B, K, N) and C has shape (sum_B(M_i), N)
"""
pid_batch = tl.program_id(0)
pid = tl.program_id(1)
begin = tl.load(a_offset_ptr + pid_batch)
end = tl.load(a_offset_ptr + pid_batch + 1)
M = tl.minimum(end - begin, max_seq_len)
if M == 0:
return
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_m = pid // num_pid_n
pid_n = pid % num_pid_n
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
if pid_m * BLOCK_SIZE_M >= M:
return
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
if pid_n * BLOCK_SIZE_N >= N:
return
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak + begin * stride_am)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] *
stride_bn + pid_batch * stride_bl)
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
updated_offset = k + offs_k
a = tl.load(a_ptrs, mask=(updated_offset[None, :] < K) & (offs_am[:,
None] < M), other=0.0)
b = tl.load(b_ptrs, mask=(updated_offset[:, None] < K) & (offs_bn[
None, :] < N), other=0.0)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
c_ptrs = c_ptr + stride_cm * offs_m[:, None] + stride_cn * offs_n[None, :
] + begin * stride_cm
tl.store(c_ptrs, c, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
ff383b02-6ac6-4d19-8a8c-ff80198f182f | z_order.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/z_order.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_z_kernel(xyz_ptr, distance_ptr, B, N, space_size, x_offset,
y_offset, z_offset, str_xyz_B, str_xyz_N, str_xyz_C, BLK: tl.constexpr,
ASSIGN_BATCH_INDEX: tl.constexpr):
pid_b = tl.program_id(0)
pid_n = tl.program_id(1)
offs_n = pid_n * BLK + tl.arange(0, BLK)
mask_n = offs_n < N
xyz_ptrs = xyz_ptr + pid_b * str_xyz_B + offs_n * str_xyz_N
fx = tl.load(xyz_ptrs + x_offset * str_xyz_C, mask=mask_n)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_C, mask=mask_n)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_C, mask=mask_n)
ret = _calculate_zorder(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
ret |= pid_b.to(tl.int64) << 48
tl.store(distance_ptr + pid_b * N + offs_n, ret, mask=mask_n)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py |
1f260b65-2aa3-4dd8-ad87-6f5bba941dd2 | block_sparse_attention_lut.py | sparklesea/sparse-quant | sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py | e3d8b6ecab208c31b744913ed8c3caaa43605f86 | 0 | @triton.jit
def _sparse_attention_prefill_fwd_kernel(Q, K, V, sm_scale, Out, lut,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, stride_lz, stride_lh,
stride_lx, Z, H, N_CTX, LT, NNZ: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
lut_indicator = tl.program_id(1) % H
qvk_offset = off_hz * stride_qh
lut_offset = lut_indicator * stride_lz
Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=(
BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0
), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
qk_scale = sm_scale * 1.44269504
q = tl.load(Q_block_ptr, boundary_check=(0, 1), padding_option='zero')
q = (q * qk_scale).to(tl.float16)
last_nnz_id = -1
for nnz_id in range(NNZ):
present_nnz_id = tl.load(lut + lut_offset + start_m * stride_lh +
nnz_id * stride_lx)
start_n = present_nnz_id * BLOCK_N
start_n = tl.multiple_of(start_n, BLOCK_N)
present_nnz_id = present_nnz_id.to(tl.int32)
k = tl.load(tl.advance(K_block_ptr, (0, start_n)), boundary_check=(
0, 1), padding_option='zero')
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
if LT:
qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk,
float('-inf'))
qk = tl.where((offs_m[:, None] < N_CTX) & ((start_n + offs_n)[None,
:] < N_CTX), qk, float('-inf'))
m_ij = tl.max(qk, 1)
p = tl.math.exp2(qk - m_ij[:, None])
p = tl.where(m_ij[:, None] == tl.full((BLOCK_M, BLOCK_N), float(
'-inf'), tl.float32), 0.0, tl.math.exp2(qk - m_ij[:, None]))
p = p * (last_nnz_id != present_nnz_id)
l_ij = tl.sum(p, 1)
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.math.exp2(m_i - m_i_new)
beta = tl.math.exp2(m_ij - m_i_new)
l_i *= alpha
l_i_new = l_i + beta * l_ij
p_scale = beta / l_i_new
p = p * p_scale[:, None]
acc_scale = l_i / l_i_new
acc = acc * acc_scale[:, None]
v = tl.load(tl.advance(V_block_ptr, (start_n, 0)), boundary_check=(
0, 1), padding_option='zero')
p = p.to(tl.float16)
acc += tl.dot(p, v)
l_i = l_i_new
m_i = m_i_new
last_nnz_id = present_nnz_id
tl.store(O_block_ptr, acc.to(tl.float16), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache",
"BSD"
] | https://github.com/sparklesea/sparse-quant/blob/e3d8b6ecab208c31b744913ed8c3caaa43605f86/sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py |
33299f98-59f0-48e0-ae23-2da139cb499d | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_softmax_backward_kernel(grad_output_ptr, softmax_output_ptr,
grad_input_ptr, input_offsets_ptr, grad_output_row_stride,
grad_output_head_stride, softmax_output_row_stride,
softmax_output_head_stride, grad_input_row_stride,
grad_input_head_stride, max_seq_len: tl.constexpr, BLOCK_SIZE: tl.constexpr
):
"""
grad_output_ptr shpae is [SUM_B, H]
softmax_output shape is [SUM_B, H]
grad_input shape is [SUM_B, H]
"""
pid_batch = tl.program_id(0)
pid_head = tl.program_id(1)
row_begin = tl.load(input_offsets_ptr + pid_batch)
row_end = tl.load(input_offsets_ptr + pid_batch + 1)
N = tl.minimum(max_seq_len, row_end - row_begin)
col_offsets = tl.arange(0, BLOCK_SIZE)
grad_output_ptrs = (grad_output_ptr + row_begin *
grad_output_row_stride + col_offsets * grad_output_row_stride +
pid_head * grad_output_head_stride)
softmax_output_ptrs = (softmax_output_ptr + row_begin *
softmax_output_row_stride + col_offsets * softmax_output_row_stride +
pid_head * softmax_output_head_stride)
grad_output_row = tl.load(grad_output_ptrs, mask=col_offsets < N, other=0.0
)
softmax_output_row = tl.load(softmax_output_ptrs, mask=col_offsets < N,
other=0.0)
sum_value = tl.sum(grad_output_row * softmax_output_row, axis=0)
grad_input_row = (grad_output_row - sum_value) * softmax_output_row
grad_input_ptrs = (grad_input_ptr + row_begin * grad_input_row_stride +
col_offsets * grad_input_row_stride + pid_head * grad_input_head_stride
)
tl.store(grad_input_ptrs, grad_input_row, mask=col_offsets < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
a5dd6188-758a-4f75-ad16-7e404fe62595 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/linear_attn/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_linear_attn_fwd_kernel_h(k, v, h, h0, ht, s_k_h, s_k_t, s_k_d,
s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, T: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT:
tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.
constexpr):
i_k, i_v, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = tl.make_block_ptr(h0 + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32)
for i_t in range(NT):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), (
s_h_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_h += tl.dot(b_k, b_v, allow_tf32=False)
if STORE_FINAL_STATE:
p_ht = tl.make_block_ptr(ht + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/chunk.py |
e7e27939-c077-46e1-9632-7858a429dae5 | k_layer_norm.py | cpuhrsch/torchfused | torchfused/triton/k_layer_norm.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _layer_norm_non_affine_fw(X, Y, M, V, stride, N, eps, **META):
_store(_layer_norm_non_affine(X, M, V, stride, N, eps, META), Y, stride,
N, META)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py |
a8c30808-d914-41b7-8bd2-094cbdbfcbd0 | k_fused_matmul_bw.py | cpuhrsch/torchfused | torchfused/triton/k_fused_matmul_bw.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.heuristics({'EVEN_N': lambda *args, **meta: args[3] % meta[
'BLOCK_COL'] == 0})
@triton.autotune(configs=[triton.Config({'BLOCK_COL': 32}, num_stages=5,
num_warps=2), triton.Config({'BLOCK_COL': 64}, num_stages=5, num_warps=
2), triton.Config({'BLOCK_COL': 128}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_COL': 256}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_COL': 512}, num_stages=3, num_warps=8), triton.Config({
'BLOCK_COL': 1024}, num_stages=3, num_warps=16)], key=['N'])
@triton.jit
def kernel_bw(GRAD_ACT, GRAD_OUT, ACT_INPUTS, N, stride_gom, stride_aim, **META
):
"""
Go over all the activation inputs, compute the corresponding gradient
"""
BLOCK_N = META['BLOCK_COL']
pid_m, pid_n = tl.program_id(axis=0), tl.program_id(axis=1)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
act_input_ptrs = ACT_INPUTS + pid_m * stride_aim + rn
if META['EVEN_N']:
act_in = tl.load(act_input_ptrs)
else:
act_in = tl.load(act_input_ptrs, mask=rn < N, other=0.0)
grad_act = META['ACTIVATION_GRAD'](act_in)
grad_out_ptrs = GRAD_OUT + pid_m * stride_gom + rn
if META['EVEN_N']:
grad_out = tl.load(grad_out_ptrs)
else:
grad_out = tl.load(grad_out_ptrs, mask=rn < N)
grad_act *= grad_out
grad_act_ptrs = GRAD_ACT + pid_m * stride_gom + rn
tl.store(grad_act_ptrs, grad_act, mask=rn < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_fused_matmul_bw.py |
e3fb6f4a-8ca3-4994-899f-d62d808652d3 | shape.py | 2niuhe/triton_utils | src/triton_utils/shape.py | 6184906ac3b86dac3ccbfac128ec393ccecde5df | 0 | @triton.jit
def store_1d(vals, ptr, sz: tl.constexpr, n, max, stride=1):
"""Store 1d block into nth chunk of vector (defined by ptr), where each chunk has size sz"""
offs = get_1d_offest(sz, n)
mask = get_1d_mask(offs, max)
tl.store(ptr + offs, vals, mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py |
ad3e39e4-beb3-4789-856e-e24e65695e79 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8]], key=['BT', 'BK'])
@triton.jit
def fwd_recompute_w_kernel(k, beta, w, A, offsets, indices, T: tl.constexpr,
H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BT, BT), (1, 0))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_A = tl.load(p_A, boundary_check=(0, 1)).to(k.dtype.element_ty)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_w = tl.dot(b_A, b_kb, allow_tf32=False)
tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py |
02c185b4-ba6d-4e60-84de-9ccd865f78e9 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_fwd_kernel_intra_K(v, z, o, A, s_v_h, s_v_t, s_v_d, T: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.
constexpr, NC: tl.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_t, i_i = i_c // NC, i_c % NC
p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_zn = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), ((i_t *
BT + i_i * BC) * V + i_v * BV,), (BV,), (0,))
b_zn = tl.load(p_zn, boundary_check=(0,))
b_o = tl.zeros([BC, BV], dtype=tl.float32)
for i_j in range(0, i_i):
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_o += tl.dot(b_A, tl.exp(b_v - b_zn[None, :]).to(b_v.dtype),
allow_tf32=False)
b_z = tl.load(p_z, boundary_check=(0, 1))
b_o *= tl.exp(b_zn[None, :] - b_z)
o_i = tl.arange(0, BC)
o_A = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC)
) * BT + i_i * BC
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
for j in range(0, BC):
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T * V,), (1,), ((i_t *
BT + i_i * BC + j) * V + i_v * BV,), (BV,), (0,))
b_A = tl.load(A + o_A + j, mask=m_A, other=0)
b_v = tl.load(p_v, boundary_check=(0,)).to(tl.float32)
m_i = o_i[:, None] >= j
b_o += tl.where(m_i, b_A[:, None] * tl.exp(b_v[None, :] - b_z), 0)
p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
159f0cc5-72b0-4231-97dc-2a5e3f2c0d0b | hilbert.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/hilbert.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_hilbert_unpadded_kernel(xyz_ptr, batch_idx_ptr, code_ptr,
space_size, x_offset, y_offset, z_offset, str_xyz_n, str_xyz_c, N, BLK:
tl.constexpr, ASSIGN_BATCH_INDEX: tl.constexpr):
pid = tl.program_id(0)
offs_n = pid * BLK + tl.arange(0, BLK)
mask = offs_n < N
xyz_ptrs = xyz_ptr + offs_n * str_xyz_n
fx = tl.load(xyz_ptrs + x_offset * str_xyz_c, mask=mask)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_c, mask=mask)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_c, mask=mask)
ret = _calculate_hilbert_distance(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
batch_idx_ptrs = batch_idx_ptr + offs_n
batch_idx = tl.load(batch_idx_ptrs, mask=mask).to(tl.int64)
ret |= batch_idx << 48
code_ptrs = code_ptr + offs_n
tl.store(code_ptrs, ret, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/hilbert.py |
2c6c706f-18a5-446c-bc50-dd5319c23177 | triton_fused_local_attn_rerope.py | LouChao98/vqtree | ops/triton_fused_local_attn_rerope.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q1, Q2, K1, K2, V, Out, L, softmax_scale, stride_qb,
stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb,
stride_vh, stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q,
seqlen_k, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, WINDOW_SIZE: tl.
constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N:
tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, WRITE_LSE:
tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
Q1_block_ptr = tl.make_block_ptr(base=Q1 + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
Q2_block_ptr = tl.make_block_ptr(base=Q2 + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
K1_block_ptr = tl.make_block_ptr(base=K1 + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
K2_block_ptr = tl.make_block_ptr(base=K2 + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h *
stride_vh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_vn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q1 = tl.load(Q1_block_ptr)
q2 = tl.load(Q2_block_ptr)
else:
q1 = tl.load(Q1_block_ptr, boundary_check=(0,), padding_option='zero')
q2 = tl.load(Q2_block_ptr, boundary_check=(0,), padding_option='zero')
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q1, q2, softmax_scale,
K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n,
seqlen_k, WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 1)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q1, q2, softmax_scale,
K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n,
seqlen_k, WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 2)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q1, q2, softmax_scale,
K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n,
seqlen_k, WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 3)
if WRITE_LSE:
l_ptrs = L + off_hb * seqlen_q + offs_m
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
acc = acc / l_i[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn_rerope.py |
295647f9-b805-4b3f-8f9a-72124ff188fd | relu.py | daemyung/practice-triton | relu.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @staticmethod
@triton.jit
def backward(grad_input_ptr, grad_output_ptr, input_ptr, size, block_size:
tl.constexpr):
pid = tl.program_id(0)
offset = pid * block_size
grad_input_block_ptr = tl.make_block_ptr(grad_input_ptr, shape=(size,),
strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,))
grad_output_block_ptr = tl.make_block_ptr(grad_output_ptr, shape=(size,
), strides=(1,), offsets=(offset,), block_shape=(block_size,),
order=(0,))
input_block_ptr = tl.make_block_ptr(input_ptr, shape=(size,), strides=(
1,), offsets=(offset,), block_shape=(block_size,), order=(0,))
grad_output = tl.load(grad_output_block_ptr, boundary_check=(0,))
input = tl.load(input_block_ptr, boundary_check=(0,))
condition = input >= 0
grad_input = tl.where(condition, grad_output, 0)
tl.store(grad_input_block_ptr, grad_input, boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/relu.py |
98f78d67-d8c7-4106-a9bc-6716d5cd0889 | sb_varlen_fwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_varlen/sb_varlen_fwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.jit
def compute_block(q, k, qk_scale, neg_log_acc, M_blk_idxs, N_blk_idxs, cm,
on_band: tl.constexpr, ALLOW_TF32: tl.constexpr, backward: tl.constexpr,
attend_current: tl.constexpr=False, use_cumsum: tl.constexpr=False,
is_compiling: tl.constexpr=False):
qk = tl.dot(q, tl.trans(k), allow_tf32=ALLOW_TF32) * qk_scale
log_om_beta = -softplus(qk, is_compiling=is_compiling)
if on_band:
if attend_current:
block_mask = M_blk_idxs[:, None] >= N_blk_idxs[None, :]
else:
block_mask = M_blk_idxs[:, None] > N_blk_idxs[None, :]
log_om_beta = tl.where(block_mask, log_om_beta, 0.0)
if backward:
neg_log_acc -= tl.sum(log_om_beta, axis=1)
log_p = qk + neg_log_acc[:, None]
if use_cumsum:
log_p += tl.cumsum(log_om_beta.to(q.dtype), axis=1, reverse=True)
else:
log_p = tl.dot(log_om_beta.to(q.dtype), cm, acc=log_p,
allow_tf32=ALLOW_TF32)
p = tl.math.exp2(log_p)
p = tl.where(block_mask, p, 0.0)
else:
if backward:
neg_log_acc -= tl.sum(log_om_beta, axis=1)
log_p = qk + neg_log_acc[:, None]
if use_cumsum:
log_p += tl.cumsum(log_om_beta.to(q.dtype), axis=1, reverse=True)
else:
log_p = tl.dot(log_om_beta.to(q.dtype), cm, acc=log_p,
allow_tf32=ALLOW_TF32)
p = tl.math.exp2(log_p)
if not backward:
neg_log_acc += tl.sum(log_om_beta, axis=1)
return p, log_om_beta, neg_log_acc
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Activation Functions"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py |
c3784dfb-4b90-4a4f-9175-4a69cc1f915d | fused_attn.py | thunlp/Delta-CoMe | quant/fused_attn.py | 646a1fbf3443295c4b04aba27334c6bc5aa3df4f | 0 | @triton.jit
def rotate_half_kernel(qk_seq_ptr, position_ids_ptr, qk_seq_stride,
position_ids_batch_stride, seq_len, HEAD_DIM: tl.constexpr,
BLOCK_HEIGHT: tl.constexpr, BLOCK_WIDTH: tl.constexpr, INV_BASE: tl.
constexpr):
HALF_HEAD: tl.constexpr = HEAD_DIM // 2
STEPS_PER_ROW: tl.constexpr = HALF_HEAD // BLOCK_WIDTH
batch_seq = tl.program_id(axis=0)
row_blk_x_col_blk = tl.program_id(axis=1)
row_blk = row_blk_x_col_blk // STEPS_PER_ROW
row = row_blk * BLOCK_HEIGHT
if BLOCK_WIDTH < HALF_HEAD:
col_blk = row_blk_x_col_blk % STEPS_PER_ROW
col = col_blk * BLOCK_WIDTH
else:
col: tl.constexpr = 0
batch = batch_seq // seq_len
seq = batch_seq % seq_len
position_id = tl.load(position_ids_ptr + batch *
position_ids_batch_stride + seq)
freq = tl.libdevice.exp((col + tl.arange(0, BLOCK_WIDTH)).to(tl.float32
) * INV_BASE) * position_id
cos = tl.cos(freq).to(tl.float32)
sin = tl.sin(freq).to(tl.float32)
col_offsets: tl.constexpr = tl.arange(0, BLOCK_WIDTH)
embed_offsets = row * HEAD_DIM + col + col_offsets
x_ptrs = qk_seq_ptr + batch_seq * qk_seq_stride + embed_offsets
for k in range(0, BLOCK_HEIGHT):
x = tl.load(x_ptrs).to(tl.float32)
y = tl.load(x_ptrs + HALF_HEAD).to(tl.float32)
out_x = x * cos - y * sin
tl.store(x_ptrs, out_x)
out_y = x * sin + y * cos
tl.store(x_ptrs + HALF_HEAD, out_y)
x_ptrs += HEAD_DIM
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/thunlp/Delta-CoMe/blob/646a1fbf3443295c4b04aba27334c6bc5aa3df4f/quant/fused_attn.py |
297bd9f8-dbf4-4cd4-b87b-6208c25245d1 | pointwise.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/pointwise.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_copy_kernel(input_ptr, out_ptr, numel: tl.constexpr, block_size:
tl.constexpr):
block_start = tl.program_id(axis=0).to(tl.int64) * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < numel
input_ = tl.load(input_ptr + offsets, mask=mask)
tl.store(out_ptr + offsets, input_, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/pointwise.py |
c7b00225-9961-4819-bb21-d098a0681a35 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4)],
key=['M', 'N', 'K'])
@triton.jit
def rz_linear_backward_weight_grad_kernel_fp32(a_ptr, b_ptr, c_ptr,
init_factor, M, N, K, H, stride_am, stride_ak, stride_bm, stride_bn, R7:
int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=
c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=
stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=
stride_bn, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=False, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=
BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
b43d035a-53a0-4ffb-8bee-abc8a227f8b9 | ops.py | shawntan/scattermoe | scattermoe/kernels/ops.py | 63b76a2f5f28c052fb4cd7c34479a54158354052 | 0 | @triton.autotune(configs=_scatter2scatter_configs(), key=['M', 'N', 'K'])
@triton.heuristics({'NO_K_MASK': lambda args: args['K'] % args['BLOCK_K'] ==
0, 'NO_N_MASK': lambda args: args['N'] % args['BLOCK_N'] == 0})
@triton.jit
def _scatter2scatter(X_ptr, stride_xm, stride_xk, W_ptr, stride_we,
stride_wk, stride_wn, Y_ptr, stride_ym, stride_yn, grouped_idx_ptr,
expert_idxs_ptr, block_start_idx_ptr, FAN_OUT: tl.constexpr, M, K: tl.
constexpr, N: tl.constexpr, E: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, ACC_TYPE: tl.constexpr,
OUT_M, allow_tf32: tl.constexpr, x_grouped: tl.constexpr, y_grouped: tl
.constexpr, NO_K_MASK: tl.constexpr, NO_N_MASK: tl.constexpr):
pid = tl.program_id(axis=0)
N_BLOCK_COUNT = tl.cdiv(N, BLOCK_N)
M_block_id = pid // N_BLOCK_COUNT
N_block_id = pid % N_BLOCK_COUNT
M_range = tl.arange(0, BLOCK_M)
block_start_idx = tl.load(block_start_idx_ptr + M_block_id)
M_block = tl.max_contiguous(block_start_idx + M_range, BLOCK_M)
E_idxs = tl.load(expert_idxs_ptr + M_block, mask=M_block < FAN_OUT * M,
other=E)
E_idx = tl.min(E_idxs)
E_mask = E_idxs == E_idx
M_idx = tl.load(grouped_idx_ptr + M_block, mask=E_mask, other=0)
if x_grouped:
M_in_idx = M_block
else:
M_in_idx = M_idx // FAN_OUT
if y_grouped:
M_out_idx = M_block
else:
M_out_idx = M_idx
K_block = tl.arange(0, BLOCK_K)
N_block = N_block_id * BLOCK_N + tl.arange(0, BLOCK_N)
N_mask = N_block < N
X_blk_ptrs = X_ptr + M_in_idx[:, None] * stride_xm + K_block[None, :
] * stride_xk
W_blk_ptrs = W_ptr + K_block[:, None] * stride_wk + N_block[None, :
] * stride_wn + E_idx * stride_we
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
iters = tl.cdiv(K, BLOCK_K)
for K_block_id in range(0, iters):
if NO_K_MASK:
x = tl.load(X_blk_ptrs, mask=E_mask[:, None])
if NO_N_MASK or K_block_id < iters - 1:
w = tl.load(W_blk_ptrs)
else:
w = tl.load(W_blk_ptrs, mask=N_mask[None, :])
else:
K_mask = K_block_id * BLOCK_K + K_block < K
x = tl.load(X_blk_ptrs, mask=E_mask[:, None] & K_mask[None, :])
w = tl.load(W_blk_ptrs, mask=K_mask[:, None] & N_mask[None, :])
X_blk_ptrs += BLOCK_K * stride_xk
W_blk_ptrs += BLOCK_K * stride_wk
acc += tl.dot(x, w, allow_tf32=allow_tf32, out_dtype=ACC_TYPE)
Y_blk_ptrs = Y_ptr + (M_out_idx[:, None] * stride_ym + N_block[None, :] *
stride_yn)
tl.store(Y_blk_ptrs, acc, mask=E_mask[:, None] & N_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/ops.py |
98ed13e6-3d06-4b48-bf3b-55ccbee71cfb | dw_conv.py | neuro-ml/kerops | kerops/kernels/dw_conv.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _DWConv_wgrad_cl3d_impl(grad_ptr, input_ptr, weight_grad_ptr, H, W, D,
H_stride, W_stride, ACCTYPE: tl.constexpr, channels: tl.constexpr,
D_block: tl.constexpr, WD_grid):
H_cell = tl.program_id(0)
W_D_cell = tl.program_id(1)
D_gridsize = tl.cdiv(D, D_block)
W_cell = W_D_cell // D_gridsize
D_cell = W_D_cell % D_gridsize
input_ptr += D_cell * D_block * channels
grad_ptr += D_cell * D_block * channels
weight_grad_ptr += (H_cell * WD_grid + W_D_cell) * 27 * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset,
channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[None, None, :] * channels + channels_offset[None, :, None
] + near_offset[:, None, None] * channels
mask = d_offset[None, None, :] + near_offset[:, None, None
] < D - D_block * D_cell
mask = mask and d_offset[None, None, :] + near_offset[:, None, None
] >= 0 - D_block * D_cell
mask = mask and near_offset[:, None, None] != 2
in_offset = d_offset[None, None, :] * channels + channels_offset[None,
:, None]
in_mask = d_offset[None, None, :] < D - D_block * D_cell
H1_load = 2 * H_cell + 1 < H
W1_load = 2 * W_cell + 1 < W
h0_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + 2 * W_cell * W_stride
x_h0_w0 = tl.load(tmp_input_ptr + in_offset, mask=in_mask, other=0.0)
tmp_input_ptr = input_ptr + (2 * H_cell + 1
) * H_stride + 2 * W_cell * W_stride
x_h1_w0 = tl.load(tmp_input_ptr + in_offset, mask=in_mask and H1_load,
other=0.0)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + (2 * W_cell + 1
) * W_stride
x_h0_w1 = tl.load(tmp_input_ptr + in_offset, mask=in_mask and W1_load,
other=0.0)
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1
) * W_stride
x_h1_w1 = tl.load(tmp_input_ptr + in_offset, mask=in_mask and (W1_load and
H1_load), other=0.0)
gradw_offset = tl.arange(0, 4)[:, None] * channels + channels_offset[
None, :]
gradw_mask = near_offset[:, None] != 2
load_next = (2 * H_cell - 1 < H and 2 * H_cell - 1 >= 0) and (2 *
W_cell - 1 < W and 2 * W_cell - 1 >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell - 1) * H_stride + (2 * W_cell - 1
) * W_stride
i = -1
j = -1
grad = tl.zeros([4, channels, D_block], dtype=tl.float16)
if load_next:
grad = tl.load(tmp_grad_ptr + offset, mask=mask)
for k in tl.static_range(0, 16):
if load_next:
if i == -1 and j == -1:
h2_w2 += tl.sum(grad * x_h0_w0, axis=2)
elif i == -1 and j == 0:
h2_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 1:
h2_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 2:
h2_w0 += tl.sum(grad * x_h0_w1, axis=2)
elif i == 0 and j == -1:
h1_w2 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 0 and j == 0:
h1_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h1_w0, axis=2)
h1_w2 += tl.sum(grad * x_h0_w1, axis=2)
h2_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 1:
h1_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w0 += tl.sum(grad * x_h1_w0, axis=2)
h1_w1 += tl.sum(grad * x_h0_w1, axis=2)
h2_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 2:
h1_w0 += tl.sum(grad * x_h0_w1, axis=2)
h2_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == -1:
h0_w2 += tl.sum(grad * x_h0_w0, axis=2)
h1_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 1 and j == 0:
h0_w1 += tl.sum(grad * x_h0_w0, axis=2)
h1_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h0_w1, axis=2)
h1_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 1:
h0_w0 += tl.sum(grad * x_h0_w0, axis=2)
h1_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h0_w1, axis=2)
h1_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 2:
h0_w0 += tl.sum(grad * x_h0_w1, axis=2)
h1_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == -1:
h0_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 2 and j == 0:
h0_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == 1:
h0_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h1_w1, axis=2)
else:
h0_w0 += tl.sum(grad * x_h1_w1, axis=2)
k_ = k + 1
i = k_ % 4 - 1
j = k_ // 4 - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 *
W_cell + j < W and 2 * W_cell + j >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j
) * W_stride
if load_next and k_ < 16:
grad = tl.load(tmp_grad_ptr + offset, mask=mask)
tl.store(weight_grad_ptr + gradw_offset, h0_w0, mask=gradw_mask)
tl.store(weight_grad_ptr + 3 * channels + gradw_offset, h0_w1, mask=
gradw_mask)
tl.store(weight_grad_ptr + 6 * channels + gradw_offset, h0_w2, mask=
gradw_mask)
tl.store(weight_grad_ptr + 9 * channels + gradw_offset, h1_w0, mask=
gradw_mask)
tl.store(weight_grad_ptr + 12 * channels + gradw_offset, h1_w1, mask=
gradw_mask)
tl.store(weight_grad_ptr + 15 * channels + gradw_offset, h1_w2, mask=
gradw_mask)
tl.store(weight_grad_ptr + 18 * channels + gradw_offset, h2_w0, mask=
gradw_mask)
tl.store(weight_grad_ptr + 21 * channels + gradw_offset, h2_w1, mask=
gradw_mask)
tl.store(weight_grad_ptr + 24 * channels + gradw_offset, h2_w2, mask=
gradw_mask)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/dw_conv.py |
2f63fe4e-4b75-45aa-b01f-c5b1eed17423 | main_triton.py | dwgan/GraphMST | main_triton.py | 4d65ed0f108d339e3e4cfff25085a39adc6a48a2 | 0 | @triton.jit
def find_kernel(parent, u, ret_ptr, BLOCK_SIZE: tl.constexpr):
pu = tl.load(parent + u)
while pu != u:
u = pu
pu = tl.load(parent + u)
ret_ptr[u % BLOCK_SIZE] = pu
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/dwgan/GraphMST/blob/4d65ed0f108d339e3e4cfff25085a39adc6a48a2/main_triton.py |
fee89749-a0a9-4316-87b5-75545363f010 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16]], key=['BK'])
@triton.jit
def fwd_prepare_wy_repr_kernel_chunk32(k, beta, A, offsets, indices, T: tl.
constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.
constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_A = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_A += tl.dot(b_kb, tl.trans(b_k), allow_tf32=False)
b_A = -tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :],
b_A, 0)
for i in range(1, BT):
mask = tl.arange(0, BT) == i
b_a = tl.sum(tl.where(mask[:, None], b_A, 0), 0)
b_a = b_a + tl.sum(b_a[:, None] * b_A, 0) * (tl.arange(0, BT) < i)
b_A = tl.where(mask[:, None], b_a, b_A)
b_A += tl.arange(0, BT)[:, None] == tl.arange(0, BT)[None, :]
if HEAD_FIRST:
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BT, BT), (1, 0))
tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1))
b_A = b_A.to(k.dtype.element_ty)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py |
3ccfe00c-b0c4-4828-873a-fd5b2174ea1b | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_mean/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS,
NUM_STAGES)], key=['M'])
@triton.jit
def triton_jagged_mean_kernel_variable_length_loop_sum_then_buffer(
input_ptr_values, input_ptr_offsets, output_ptr, M, BLOCK_SIZE_RAGGED:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((1, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load(
input_ptr_offsets + (pid_b + 1))
ragged_len = ragged_end - ragged_start
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer += tl.sum(input, axis=0)
buffer_view = buffer.reshape((BLOCK_SIZE_M,))
buffer_view_mean = buffer_view * (1 / ragged_len)
output_offsets = offsets_m + pid_b * M
output_mask = output_offsets < M * (pid_b + 1)
tl.store(output_ptr + output_offsets, buffer_view_mean, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py |
58fb14a4-ee5b-47db-b1e7-de8b3fd737be | fused_moe.py | Charlie-XIAO/sparse-vllm | vllm/model_executor/layers/fused_moe/fused_moe.py | d228909a30b0c245c35417fb7d2acdf9a3690042 | 0 | @triton.jit
def fused_moe_kernel(a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr,
topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr,
num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am,
stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn,
stride_bse, stride_bsn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr,
MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.
constexpr, use_fp8_w8a8: tl.constexpr, use_int8_w8a16: tl.constexpr):
"""
Implements the fused computation for a Mixture of Experts (MOE) using
token and expert matrices.
Key Parameters:
- A: The input tensor representing tokens with shape (*, K), where '*' can
be any shape representing batches and K is the feature dimension of
each token.
- B: The stacked MOE weight tensor with shape (E, N, K), where E is
the number of experts, K is the input feature dimension, and N is
the output feature dimension.
- C: The output cache tensor with shape (M, topk, N), where M is the
total number of tokens post padding, topk is the number of times
each token is repeated, and N is the output feature dimension.
- sorted_token_ids: A tensor containing the sorted indices of tokens,
repeated topk times and arranged by the expert index they are
assigned to.
- expert_ids: A tensor containing the indices of the expert for each
block. It determines which expert matrix from B should be used for
each block in A.
This kernel performs the multiplication of a token by its corresponding
expert matrix as determined by `expert_ids`. The sorting of
`sorted_token_ids` by expert index and padding ensures divisibility by
BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix
multiplication across different blocks processed by the same expert.
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr)
if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded:
return
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[
None, :] * stride_ak)
off_experts = tl.load(expert_ids_ptr + pid_m)
b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk +
offs_bn[None, :] * stride_bn)
if use_int8_w8a16:
b_scale_ptrs = b_scale_ptr + off_experts * stride_bse + offs_bn[None, :
] * stride_bsn
b_scale = tl.load(b_scale_ptrs)
if use_fp8_w8a8:
a_scale = tl.load(a_scale_ptr)
b_scale = tl.load(b_scale_ptr + off_experts)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K -
k * BLOCK_SIZE_K), other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K,
other=0.0)
if use_int8_w8a16:
accumulator = tl.dot(a, b.to(compute_type), acc=accumulator)
elif use_fp8_w8a8:
accumulator = tl.dot(a, b, acc=accumulator)
else:
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if MUL_ROUTED_WEIGHT:
moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask,
other=0)
accumulator = accumulator * moe_weight[:, None]
if use_int8_w8a16:
accumulator = (accumulator * b_scale).to(compute_type)
elif use_fp8_w8a8:
accumulator = (accumulator * a_scale * b_scale).to(compute_type)
else:
accumulator = accumulator.to(compute_type)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[
None, :]
c_mask = token_mask[:, None] & (offs_cn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
| {
"Data Type": [
"int8",
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Top-K Selection"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/fused_moe/fused_moe.py |
b915901f-25e7-4e07-86fb-3ce11a600e0e | test_triton_varargs.py | facebookresearch/xformers | tests/test_triton_varargs.py | a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc | 0 | @triton.jit
def kernel(x_ptrs: 'VAR_ARGS_ARRAY', y_ptrs: 'VAR_ARGS_ARRAY', numel,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
offsets = BLOCK_SIZE * pid + tl.arange(0, BLOCK_SIZE)
mask = offsets < numel
for i in range(len(x_ptrs)):
x_ptr = x_ptrs[i]
y_ptr = y_ptrs[i]
data = tl.load(x_ptr + offsets, mask)
result = data * data
tl.store(y_ptr + offsets, result, mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/facebookresearch/xformers/blob/a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc/tests/test_triton_varargs.py |
b5a86ee1-c573-4ef0-b121-3f8c69923b2c | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(list(filter(keep, configsWS)), key=['N_CTX'])
@triton.jit
def _attn_fwd_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr, STAGE: tl.
constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.constexpr,
ENABLE_WS: tl.constexpr):
tl.static_assert(BLOCK_N <= HEAD_DIM)
pid = tl.program_id(0)
off_hz = tl.program_id(1)
_attn_fwd_compute_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid,
Z, H, N_CTX, BLOCK_M, BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA,
LOOP_SCHEDULE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
ac3a79c4-8892-4afd-bc3a-b1376778c60f | parallel_scan.py | chengkai-liu/RecBLR | parallel_scan.py | 66e520c26e28c05a5425ba2e81c9169b7e0176e2 | 0 | @triton.jit
def pack64(a, b):
tl.static_assert(a.dtype == tl.float32)
tl.static_assert(b.dtype == tl.float32)
a = a.to(dtype=tl.uint32, bitcast=True).to(tl.uint64)
a = a << 32
b = b.to(dtype=tl.uint32, bitcast=True).to(tl.uint64)
return a | b
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Register Intensive"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py |
aeb1d88e-f7a3-4baf-ad05-a447c96fd287 | nll_loss_kernels.py | BobMcDear/attorch | attorch/nll_loss_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim',
'spatial_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_SPATIAL': lambda args: next_power_of_2(args['spatial_dim'])})
@triton.jit
def nll_loss_forward_kernel(input_pointer, target_pointer, weight_pointer,
sum_weights_pointer, output_pointer, batch_dim, spatial_dim,
input_batch_stride, input_feat_stride, input_spatial_stride,
target_batch_stride, target_spatial_stride, output_batch_stride,
output_spatial_stride, reduction: tl.constexpr, weighted: tl.constexpr,
BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_SPATIAL: tl.constexpr):
"""
Measures the negative log likelihood loss between the input and target,
with optional reweighing of each class.
Args:
input_pointer: Pointer to the input.
The input must be of shape [batch_dim, feat_dim, spatial_dim].
target_pointer: Pointer to the target.
The target must be of shape [batch_dim, spatial_dim].
weight_pointer: Pointer to an optional class weight vector.
The class weight vector, if provided, must be of shape [feat_dim].
sum_weights_pointer: Pointer to a container the sum of the class weights is written to.
The container must be of shape [batch_dim/BLOCK_SIZE_BATCH].
output_pointer: Pointer to a container the loss is written to.
The container must be of shape [batch_dim, spatial_dim] if reduction is 'none',
and otherwise of shape [batch_dim/BLOCK_SIZE].
batch_dim: Batch dimension.
spatial_dim: Spatial dimension.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
input_spatial_stride: Stride necessary to jump one element along the
input's spatial dimension.
target_batch_stride: Stride necessary to jump one element along the
target's batch dimension.
target_spatial_stride: Stride necessary to jump one element along the
target's spatial dimension.
output_batch_stride: Stride necessary to jump one element along the
output container's batch dimension.
output_spatial_stride: Stride necessary to jump one element along the
output container's spatial dimension.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
If a reduction method is specified, the reduced result of each
program is written to a separate index in the summed weights and
output container, which should later be summed.
weighted: Flag for weighing each class.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_SPATIAL: Block size across the spatial dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
spatial_offset = tl.arange(0, BLOCK_SIZE_SPATIAL)
batch_mask = batch_offset < batch_dim
spatial_mask = spatial_offset < spatial_dim
target_pointer += target_batch_stride * batch_offset[:, None
] + target_spatial_stride * spatial_offset[None, :]
target = tl.load(target_pointer, mask=batch_mask[:, None] &
spatial_mask[None, :])
input_pointer += (input_feat_stride * target + input_batch_stride *
batch_offset[:, None] + input_spatial_stride * spatial_offset[None, :])
input = tl.load(input_pointer, mask=batch_mask[:, None] & spatial_mask[
None, :]).to(tl.float32)
output = -input
if weighted:
weight = tl.load(weight_pointer + target, mask=batch_mask[:, None] &
spatial_mask[None, :]).to(tl.float32)
output *= weight
if reduction == 'none':
output_pointer += output_batch_stride * batch_offset[:, None
] + output_spatial_stride * spatial_offset[None, :]
tl.store(output_pointer, output, mask=batch_mask[:, None] &
spatial_mask[None, :])
elif reduction == 'mean':
if weighted:
tl.store(sum_weights_pointer + batch_pid, tl.sum(weight))
tl.store(output_pointer + batch_pid, tl.sum(output))
else:
tl.store(output_pointer + batch_pid, tl.sum(output) / (
batch_dim * spatial_dim))
elif reduction == 'sum':
tl.store(output_pointer + batch_pid, tl.sum(output))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/nll_loss_kernels.py |
f9da9e73-4afd-45c7-a28d-2725468622a1 | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=warps) for warps in [
4, 8, 16]], key=['QUERY_GROUP_SIZE', 'HEAD_SIZE', 'NUM_PARTITIONS',
'PARTITION_SIZE'])
@triton.jit
def _paged_attn_w_mma_v2_reduce_kernel(out_ptr, m_i_ptr, l_i_ptr,
tmp_out_ptr, context_lens_ptr, max_num_partitions, stride_o0, stride_o1,
stride_o2, HEAD_SIZE: tl.constexpr, QUERY_GROUP_SIZE: tl.constexpr,
PADDED_QUERY_GROUP_SIZE: tl.constexpr, NUM_KV_HEADS: tl.constexpr,
PARTITION_SIZE: tl.constexpr, NUM_PARTITIONS: tl.constexpr):
seq_idx = tl.program_id(0)
kv_head_idx = tl.program_id(1)
context_len = tl.load(context_lens_ptr + seq_idx)
num_partitions = tl.cdiv(context_len, PARTITION_SIZE)
group_head_offset = tl.arange(0, PADDED_QUERY_GROUP_SIZE)[:, None
] * HEAD_SIZE + tl.arange(0, HEAD_SIZE)[None, :]
group_mask = tl.arange(0, PADDED_QUERY_GROUP_SIZE)[:, None
] < QUERY_GROUP_SIZE
if num_partitions == 1:
tmp_out_offset = ((seq_idx * NUM_KV_HEADS + kv_head_idx) *
max_num_partitions * QUERY_GROUP_SIZE * HEAD_SIZE +
group_head_offset)
tmp_out = tl.load(tmp_out_ptr + tmp_out_offset, mask=group_mask,
other=0.0)
out_offset = (seq_idx * stride_o0 + kv_head_idx * QUERY_GROUP_SIZE *
stride_o1 + group_head_offset * stride_o2)
tl.store(out_ptr + out_offset, tmp_out, mask=group_mask)
return
ml_offset = (seq_idx * NUM_KV_HEADS + kv_head_idx
) * max_num_partitions * QUERY_GROUP_SIZE + tl.arange(0, NUM_PARTITIONS
)[:, None] * QUERY_GROUP_SIZE + tl.arange(0, PADDED_QUERY_GROUP_SIZE)[
None, :]
mask = (tl.arange(0, NUM_PARTITIONS)[:, None] < num_partitions) & (tl.
arange(0, PADDED_QUERY_GROUP_SIZE)[None, :] < QUERY_GROUP_SIZE)
m_i = tl.load(m_i_ptr + ml_offset, mask=mask, other=float('-inf'))
m = tl.max(m_i, axis=0)
l_i = tl.load(l_i_ptr + ml_offset, mask=mask, other=0.0)
l_i *= tl.exp(m_i - m[None, :])
l = tl.sum(l_i, axis=0)
r = l_i / l[None, :]
r = tl.reshape(r, (NUM_PARTITIONS, PADDED_QUERY_GROUP_SIZE, 1))
tmp_out_offset = (seq_idx * NUM_KV_HEADS + kv_head_idx
) * max_num_partitions * QUERY_GROUP_SIZE * HEAD_SIZE + tl.arange(0,
NUM_PARTITIONS)[:, None, None
] * QUERY_GROUP_SIZE * HEAD_SIZE + tl.arange(0, PADDED_QUERY_GROUP_SIZE
)[None, :, None] * HEAD_SIZE + tl.arange(0, HEAD_SIZE)[None, None, :]
tmp_out = tl.load(tmp_out_ptr + tmp_out_offset, mask=mask[:, :, None],
other=0.0)
out = tl.sum((tmp_out * r).to(tl.float32), axis=0)
out_offset = (seq_idx * stride_o0 + kv_head_idx * QUERY_GROUP_SIZE *
stride_o1 + group_head_offset * stride_o2)
tl.store(out_ptr + out_offset, out, mask=group_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
fbbb8e30-59c7-4345-a0af-c5932ca05a42 | hello_triton.py | gmgu/study-triton | 1_hello_triton/hello_triton.py | 3a9a24fd3f1de3e7465535ffe72f6deac8a419bd | 0 | @triton.jit
def hello_kernel():
print('Hello Triton Kernel!')
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/1_hello_triton/hello_triton.py |
89417c21-0b2b-4b0f-bb94-3113c88d8895 | adam.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/adam.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_adam_kernel(params_ptr, grads_ptr, exp_avgs_ptr, exp_avg_sqs_ptr,
noop_flag_ptr, scale_ptr, step_size, beta1, beta2, bias_correction,
decay_factor, epsilon, numel: tl.constexpr, block_size: tl.constexpr):
noop_flag = tl.load(noop_flag_ptr)
if noop_flag != 0:
return
scale = tl.load(scale_ptr)
block_start = tl.program_id(axis=0).to(tl.int64) * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < numel
params = tl.load(params_ptr + offsets, mask=mask)
grads = tl.load(grads_ptr + offsets, mask=mask)
grads = scale * grads
exp_avgs = tl.load(exp_avgs_ptr + offsets, mask=mask)
exp_avgs = beta1 * exp_avgs + (1 - beta1) * grads
tl.store(exp_avgs_ptr + offsets, exp_avgs, mask=mask)
exp_avg_sqs = tl.load(exp_avg_sqs_ptr + offsets, mask=mask)
exp_avg_sqs = beta2 * exp_avg_sqs + (1 - beta2) * grads * grads
tl.store(exp_avg_sqs_ptr + offsets, exp_avg_sqs, mask=mask)
params = decay_factor * params - step_size * exp_avgs / (tl.sqrt(
exp_avg_sqs) / bias_correction + epsilon)
tl.store(params_ptr + offsets, params, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/adam.py |
b21323e3-f171-4003-9eda-bd4fcfee5aff | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def make_bounds(offs_m, offs_n, M, N, EVEN_M: tl.constexpr, EVEN_N: tl.
constexpr):
if EVEN_M:
mask = offs_n[None, :] < N
elif EVEN_N:
mask = offs_m[:, None] < M
else:
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
return mask
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
5f6f1215-91df-4856-935d-ad21674c7526 | rwkv_log.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/rwkv_log.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def logaddexp(a, b):
max_ab = tl.maximum(a, b)
return max_ab + tl.log(tl.exp(a - max_ab) + tl.exp(b - max_ab))
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_log.py |
aeaaa009-521f-43fd-884c-f286d78d2d44 | fused_linear_cross_entropy.py | sustcsonglin/flash-linear-attention | fla/modules/fused_linear_cross_entropy.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def cross_entropy_kernel(logits, lse, target, loss, total, ignore_index,
label_smoothing: tl.constexpr, logit_scale: tl.constexpr, reduction: tl
.constexpr, V: tl.constexpr, BV: tl.constexpr):
"""
This kernel computes both cross entropy loss and the gradient of the input.
We only consider hard label + mean reduction for now.
Please refer to https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html for the math.
Args:
logits:
Pointer to logits tensor.
lse:
Pointer to logsumexp tensor.
target: Pointer to target tensor.
loss:
Pointer to tensor to store the loss.
V (int):
The number of columns in the input tensor.
total (int):
The number of non-ignored classes.
ignore_index (int):
The index to ignore in the target.
label_smoothing (float):
The amount of smoothing when computing the loss, where 0.0 means no smoothing.
reduction (str):
The string for the reduction to apply
BV (int):
The block size for vocab.
"""
i_n = tl.program_id(0).to(tl.int64)
NV = tl.cdiv(V, BV)
b_y = tl.load(target + i_n)
logits += i_n * V
if b_y == ignore_index:
for i in range(0, V, BV):
o_v = i + tl.arange(0, BV)
tl.store(logits + o_v, 0.0, mask=o_v < V)
return
b_l = tl.load(logits + b_y) * logit_scale
b_lse = tl.load(lse + i_n)
b_loss = b_lse - b_l
b_z = 0.0
eps = label_smoothing / V
tl.debug_barrier()
for iv in range(0, NV):
o_v = iv * BV + tl.arange(0, BV)
b_logits = tl.load(logits + o_v, mask=o_v < V, other=float('-inf')
) * logit_scale
if label_smoothing > 0:
b_z += tl.sum(tl.where(o_v < V, -eps * b_logits, 0.0))
b_p = (tl.exp(b_logits - b_lse) - eps) * logit_scale
if reduction == 'mean':
b_p = b_p / total
tl.store(logits + o_v, b_p, mask=o_v < V)
tl.debug_barrier()
if label_smoothing > 0:
b_loss = b_loss * (1 - label_smoothing) + (b_z + label_smoothing *
b_lse)
b_l = tl.load(logits + b_y)
if reduction == 'mean':
b_loss = b_loss / total
b_l += (label_smoothing - 1) / total * logit_scale
else:
b_l += (label_smoothing - 1) * logit_scale
tl.store(loss + i_n, b_loss)
tl.store(logits + b_y, b_l)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_linear_cross_entropy.py |
0259cfec-6015-444e-944d-75eaa64eb07f | y_4.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_4.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def fourth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor,
block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.
constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST000 = 1.125
CONST001 = 2.25
CONST002 = 3.0
CONST005 = 2.21852991866236
CONST007 = 9.48683298050514
CONST010 = 20.1246117974981
CONST011 = -18.8248505970167
CONST012 = -13.3111795119741
CONST013 = -10.0623058987491
CONST014 = -9.0
CONST015 = -8.87411967464942
CONST016 = -7.11512473537885
CONST017 = -6.27495019900557
CONST018 = -3.35410196624968
CONST019 = -1.67705098312484
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
Y00 = CONST015 * VAR07 * z - CONST015 * VAR25 * x
Y01 = y * (-CONST011 * VAR26 * x + CONST017 * VAR07)
Y02 = CONST018 * VAR07 * z + x * (CONST010 * VAR17 * z + CONST018 * VAR25)
Y03 = CONST016 * VAR07 * y + x * (CONST007 * VAR16 + CONST016 * VAR26 * y)
Y04 = (CONST000 * VAR06 + CONST000 * VAR24 + CONST002 * VAR15 +
CONST014 * VAR17 * VAR26 + VAR08 * (CONST001 * VAR26 + CONST014 *
VAR17))
Y05 = CONST016 * VAR25 * y + z * (CONST007 * VAR16 + CONST016 * VAR08 * y)
Y06 = -CONST019 * VAR06 + CONST019 * VAR24 + VAR17 * (CONST013 * VAR08 -
CONST013 * VAR26)
Y07 = y * (CONST011 * VAR08 * z - CONST017 * VAR25)
Y08 = CONST005 * VAR06 + CONST005 * VAR24 + CONST012 * VAR08 * VAR26
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_4.py |
99554c85-0a2d-42e8-ab1a-65744f560890 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BC'])
@triton.jit
def chunk_gla_fwd_A_kernel_intra_sub_intra_merge(A, A2, offsets, indices, B:
tl.constexpr, T: tl.constexpr, H: tl.constexpr, BT: tl.constexpr, BC:
tl.constexpr, NK: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
all = B * T
if i_t * BT + i_c * BC >= T:
return
b_A = tl.zeros([BC, BC], dtype=tl.float32)
for i_k in range(0, NK):
if HEAD_FIRST:
p_A = tl.make_block_ptr(A + (i_k * B * H + i_bh) * T * BC, (T,
BC), (BC, 1), (i_t * BT + i_c * BC, 0), (BC, BC), (1, 0))
else:
p_A = tl.make_block_ptr(A + (i_k * all + bos) * H * BC + i_h *
BC, (T, BC), (H * BC, 1), (i_t * BT + i_c * BC, 0), (BC, BC
), (1, 0))
b_A += tl.load(p_A, boundary_check=(0, 1))
if HEAD_FIRST:
p_A2 = tl.make_block_ptr(A2 + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + i_c * BC, i_c * BC), (BC, BC), (1, 0))
else:
p_A2 = tl.make_block_ptr(A2 + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT + i_c * BC, i_c * BC), (BC, BC), (1, 0))
tl.store(p_A2, b_A.to(A2.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py |
58b3463b-151f-4a0e-bc35-134133839e16 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_fwd_kernel_V(q, v, z, h, o, A, s_k_h, s_k_t, s_k_d, s_v_h,
s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_p = tl.maximum(i_t * BT - 1, 0)
b_o = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), (
s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_zp = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), (i_p *
K + i_k * BK,), (BK,), (0,))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_z = tl.load(p_z, boundary_check=(0, 1))
b_zp = tl.load(p_zp, boundary_check=(0,))
b_q = (b_q * tl.exp(b_zp[None, :] - b_z)).to(b_q.dtype)
b_h = tl.load(p_h, boundary_check=(0, 1))
if i_k >= 0:
b_o += tl.dot(b_q, b_h, allow_tf32=False)
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT,
0), (BT, BT), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_o += tl.dot(b_A, b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
466944e0-8df6-43af-8e5a-f9a4513cce97 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4, 8]], key=['BT', 'BK', 'BV'])
@triton.jit
def fwd_recompute_w_u_kernel(k, v, beta, w, u, Aw, Au, offsets, indices, T:
tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl
.constexpr, BK: tl.constexpr, BV: tl.constexpr, HEAD_FIRST: tl.
constexpr, USE_OFFSETS: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_Au = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_Au = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_Au = tl.load(p_Au, boundary_check=(0, 1))
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_u = tl.make_block_ptr(u + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_u = tl.make_block_ptr(u + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_vb = (b_v * b_beta[:, None]).to(b_v.dtype)
b_u = tl.dot(b_Au, b_vb, allow_tf32=False)
tl.store(p_u, b_u.to(p_u.dtype.element_ty), boundary_check=(0, 1))
tl.debug_barrier()
b_Au = None
if HEAD_FIRST:
p_Aw = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_Aw = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
b_Aw = tl.load(p_Aw, boundary_check=(0, 1))
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_w = tl.dot(b_Aw, b_kb)
tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/wy_fast.py |
b7a78b7c-edba-48ab-88b8-0d8b4fa84948 | normalization.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/normalization.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_normalization_backward_kernel_2(grad_weight_partial_ptr,
grad_bias_partial_ptr, grad_weight_ptr, grad_bias_ptr, m, n_cols,
has_bias: tl.constexpr, accumulate_grad: tl.constexpr, block_size_m: tl
.constexpr, block_size_n: tl.constexpr):
pid = tl.program_id(0)
cols = pid * block_size_n + tl.arange(0, block_size_n)
grad_weight_partial_sum = tl.zeros((block_size_m, block_size_n), dtype=
tl.float32)
if has_bias:
grad_bias_partial_sum = tl.zeros((block_size_m, block_size_n),
dtype=tl.float32)
col_mask = cols < n_cols
for i in range(0, m, block_size_m):
rows = i + tl.arange(0, block_size_m)
mask = (rows[:, None] < m) & (cols[None, :] < n_cols)
offsets = rows[:, None] * n_cols + cols[None, :]
grad_weight_partial_sum += tl.load(grad_weight_partial_ptr +
offsets, mask=mask, other=0.0)
if has_bias:
grad_bias_partial_sum += tl.load(grad_bias_partial_ptr +
offsets, mask=mask, other=0.0)
grad_weight = tl.sum(grad_weight_partial_sum, axis=0)
if accumulate_grad:
grad_weight = tl.load(grad_weight_ptr + cols, mask=col_mask
) + grad_weight
tl.store(grad_weight_ptr + cols, grad_weight, mask=col_mask)
if has_bias:
grad_bias = tl.sum(grad_bias_partial_sum, axis=0)
if accumulate_grad:
grad_bias = tl.load(grad_bias_ptr + cols, mask=col_mask
) + grad_bias
tl.store(grad_bias_ptr + cols, grad_bias, mask=col_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/normalization.py |
26e4d111-f4aa-445c-91fa-fcdaff284554 | k_layer_norm.py | cpuhrsch/torchfused | torchfused/triton/k_layer_norm.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _layer_norm_no_affine_bwd(DX, DY, Y, V, stride, N, **META):
row = tl.program_id(0)
cols = tl.arange(0, META['BLOCK_SIZE_N'])
y_ptrs = Y + row * stride + cols
dy_ptrs = DY + row * stride + cols
y = tl.load(y_ptrs, mask=cols < N, other=0).to(tl.float32)
dy = tl.load(dy_ptrs, mask=cols < N, other=0).to(tl.float32)
rstd = tl.load(V + row)
xhat = tl.where(cols < N, y, 0.0)
wdy = tl.where(cols < N, dy, 0.0)
mean1 = tl.sum(xhat * wdy, axis=0) / N
mean2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * mean1 + mean2)) * rstd
_store(dx, DX, stride, N, META)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py |
8cfb5a84-099f-41cc-9d64-f30e70e6e39b | quantization.py | neuro-ml/kerops | kerops/kernels/quantization.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _QuantUint8Window_impl(input_ptr, output_ptr, numel, window, BLOCK_SIZE:
tl.constexpr):
tid = tl.program_id(0)
input_ptr += tid * BLOCK_SIZE
output_ptr += tid * BLOCK_SIZE
offset = tl.arange(0, BLOCK_SIZE)
mask = offset < numel - tid * BLOCK_SIZE
input = tl.load(input_ptr + offset, mask=mask).to(tl.float32)
input = tl.minimum(tl.maximum(input, -window), window)
input = (input + window) / (2 * window)
input *= 255
input = input.to(tl.uint8)
tl.store(output_ptr + offset, input, mask=mask)
| {
"Data Type": [
"uint8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/quantization.py |
ecb649cc-99a1-47b3-abec-69aff2b74328 | kernel_benchmark.py | ruikangliu/FlatQuant | benchmarks/kernel_benchmark.py | 9d3032065f1688cb3f71ebc8166df6d91440e871 | 0 | @triton.autotune(configs=[triton.Config({}, num_stages=2, num_warps=4),
triton.Config({}, num_stages=2, num_warps=2), triton.Config({},
num_stages=3, num_warps=4), triton.Config({}, num_stages=3, num_warps=2
), triton.Config({}, num_stages=4, num_warps=4), triton.Config({},
num_stages=4, num_warps=2)], key=['B', 'M', 'N'])
@triton.jit
def matmul_kernel(a_ptr, b_ptr, c_ptr, res_ptr, output_scale, B, M: tl.
constexpr, N: tl.constexpr, np2_M: tl.constexpr, np2_N: tl.constexpr,
stride_am, stride_ak, stride_bb, stride_bk, stride_bn, stride_ck,
stride_cn, stride_resb, stride_resm, stride_resn, BLOCK_SIZE_M: tl.
constexpr, is_split: tl.constexpr):
"""
a @ b @ c
a [M, M]
b [B, M, N]
c [N, N]
now only supports BLOCK_SIZE_M == triton.next_power_of_2(BLOCK_SIZE_M)
"""
pid = tl.program_id(axis=0)
batch_id = tl.program_id(axis=1) + tl.program_id(axis=2) * tl.num_programs(
axis=1)
pid_m = pid
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = tl.arange(0, np2_N) % N
offs_k = tl.arange(0, np2_M)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
b_ptrs = b_ptr + batch_id * stride_bb.to(tl.int64) + (offs_k[:, None] *
stride_bk + offs_bn[None, :] * stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, np2_N), dtype=tl.float32)
a = tl.load(a_ptrs, mask=offs_k[None, :] < M, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < M, other=0.0)
accumulator += tl.dot(a, b)
tmp_ab = accumulator.to(tl.float16)
offs_cn = tl.arange(0, np2_N) % N
offs_k = tl.arange(0, np2_N)
c_ptrs = c_ptr + (offs_k[:, None] * stride_ck + offs_cn[None, :] *
stride_cn)
c = tl.load(c_ptrs, mask=offs_k[:, None] < N, other=0.0)
accumulator = 0
accumulator += tl.dot(tmp_ab, c)
if is_split:
res = accumulator.to(tl.float16)
offs_resm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_resn = tl.arange(0, np2_N)
res_ptrs = res_ptr + stride_resb.to(tl.int64
) * batch_id + stride_resm * offs_resm[:, None
] + stride_resn * offs_resn[None, :]
res_mask = (offs_resm[:, None] < M) & (offs_resn[None, :] < N)
tl.store(res_ptrs, res, mask=res_mask)
else:
abs_src_val = tl.abs(accumulator)
max_src_val = tl.max(abs_src_val)
scale = max_src_val / 7.0
quant_val = libdevice.llrint(accumulator / scale)
quant_val = max(-8, min(quant_val, 7))
quant_val = quant_val.reshape(BLOCK_SIZE_M, np2_N // 2, 2,
can_reorder=False)
quant_val_even, quant_val_odd = quant_val.split()
quant_val_odd = quant_val_odd << 4
res = tl.zeros((BLOCK_SIZE_M, np2_N // 2), dtype=tl.int8)
res = res | quant_val_odd & 240
res = res | quant_val_even & 15
offs_resm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_resn = tl.arange(0, np2_N // 2)
res_ptrs = res_ptr + stride_resb.to(tl.int64
) * batch_id + stride_resm * offs_resm[:, None
] + stride_resn * offs_resn[None, :]
res_mask = (offs_resm[:, None] < M) & (offs_resn[None, :] < N // 2)
tl.store(res_ptrs, res, mask=res_mask)
tl.store(output_scale + batch_id, scale.to(tl.float16))
| {
"Data Type": [
"fp32",
"fp16",
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ruikangliu/FlatQuant/blob/9d3032065f1688cb3f71ebc8166df6d91440e871/benchmarks/kernel_benchmark.py |
06a01956-3562-48af-87d8-0ba21f8d29e7 | fused_moe_a8w8.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/fused_moe_a8w8.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _fused_moe_a8w8_kernel(A, B, C, alpha_row_ptr, alpha_col_ptr,
topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr,
num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am,
stride_ak, stride_be, stride_bn, stride_bk, stride_cm, stride_cn,
stride_scale_be, stride_scale_bn, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr)
if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded:
return
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = A + (offs_token[:, None] // top_k * stride_am + offs_k[None, :
] * stride_ak)
off_experts = tl.load(expert_ids_ptr + pid_m)
b_ptrs = B + off_experts * stride_be + (offs_bn[None, :] * stride_bn +
offs_k[:, None] * stride_bk)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.int32)
_A0 = tl.zeros([1, 1], dtype=a_ptrs.dtype.element_ty)
_B0 = tl.zeros([1, 1], dtype=b_ptrs.dtype.element_ty)
lo = 0
hi = tl.cdiv(K, BLOCK_SIZE_K)
for k in range(lo, hi - 1):
a = tl.load(a_ptrs, mask=token_mask[:, None], other=_A0)
b = tl.load(b_ptrs)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
for k in range(hi - 1, hi):
a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K -
k * BLOCK_SIZE_K), other=_A0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K,
other=_B0)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
alpha_row_ptrs = alpha_row_ptr + offs_token // top_k
alpha_col_ptrs = alpha_col_ptr + off_experts * stride_scale_be + offs_cn
_ALPHA0 = tl.zeros([1], dtype=alpha_row_ptr.dtype.element_ty)
alpha_row = tl.load(alpha_row_ptrs, mask=token_mask, other=_ALPHA0).to(tl
.float32)
alpha_col = tl.load(alpha_col_ptrs, mask=offs_cn < N, other=_ALPHA0).to(tl
.float32)
accumulator = accumulator * alpha_row[:, None]
accumulator = accumulator * alpha_col[None, :]
if MUL_ROUTED_WEIGHT:
moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask,
other=0)
accumulator = accumulator * moe_weight[:, None]
accumulator = accumulator.to(tl.float16)
c_ptrs = C + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]
c_mask = token_mask[:, None] & (offs_cn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Top-K Selection"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_a8w8.py |
18bdbd68-3013-4acb-9efe-c2827d61c4ee | y_7.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_7.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def seventh_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset <
output_numel)
g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask=
output_row_offset + 7 < output_numel)
g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask=
output_row_offset + 8 < output_numel)
g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask=
output_row_offset + 9 < output_numel)
g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask=
output_row_offset + 10 < output_numel)
g_11 = tl.load(sph_grad_ptr + output_row_offset + 11, mask=
output_row_offset + 11 < output_numel)
g_12 = tl.load(sph_grad_ptr + output_row_offset + 12, mask=
output_row_offset + 12 < output_numel)
g_13 = tl.load(sph_grad_ptr + output_row_offset + 13, mask=
output_row_offset + 13 < output_numel)
g_14 = tl.load(sph_grad_ptr + output_row_offset + 14, mask=
output_row_offset + 14 < output_numel)
CONST000 = 1.66389743899677
CONST001 = 3.0
CONST003 = 5.0
CONST004 = 3.32779487799353
CONST009 = 11.7655316231354
CONST012 = 16.5555704843566
CONST014 = 20.4939015319192
CONST016 = 22.0740939791422
CONST018 = 23.5310632462709
CONST019 = 20.4939015319192
CONST020 = 27.1108834234519
CONST022 = 33.1111409687132
CONST024 = 36.7901566319036
CONST025 = 36.7901566319036
CONST026 = 38.4260653723485
CONST027 = 38.4260653723485
CONST029 = 38.4260653723485
CONST030 = 44.1481879582843
CONST032 = -4.9916923169903
CONST037 = 47.0621264925417
CONST039 = 56.2781179722634
CONST044 = -441.481879582843
CONST045 = -441.481879582843
CONST048 = 76.852130744697
CONST049 = 76.852130744697
CONST050 = -8.47215106982872
CONST054 = 110.370469895711
CONST055 = 110.370469895711
CONST056 = -399.335385359224
CONST057 = 117.655316231354
CONST058 = 122.963409191515
CONST059 = 122.963409191515
CONST061 = -376.497011940334
CONST062 = -376.497011940334
CONST064 = 141.186379477625
CONST066 = 147.160626527614
CONST067 = 153.704261489394
CONST069 = -350.955726374425
CONST072 = 203.331625675889
CONST073 = 203.331625675889
CONST074 = -307.408522978788
CONST075 = -9.60651634308713
CONST076 = -9.37968632871057
CONST079 = -281.390589861317
CONST080 = -1.66389743899677
CONST081 = -266.223590239483
CONST082 = -263.216794780819
CONST084 = -263.216794780818
CONST085 = -250.998007960223
CONST089 = 281.390589861317
CONST091 = -220.740939791422
CONST092 = -220.740939791422
CONST093 = -199.667692679612
CONST094 = -1.60108605718119
CONST095 = -187.593726574211
CONST096 = -177.482393492989
CONST097 = -9.60651634308712
CONST098 = -9.1975391579759
CONST100 = -153.704261489394
CONST101 = -147.160626527614
CONST102 = -140.695294930659
CONST104 = -133.111795119741
CONST105 = -133.111795119741
CONST106 = -125.499003980111
CONST107 = -125.499003980111
CONST109 = -105.286717912327
CONST110 = -101.665812837945
CONST111 = -99.833846339806
CONST112 = -101.665812837945
CONST113 = -4.80325817154356
CONST114 = -81.3326502703558
CONST115 = -81.3326502703557
CONST116 = -76.852130744697
CONST117 = -75.2994023880668
CONST119 = -70.5931897388126
CONST121 = -66.2222819374265
CONST122 = -66.5558975598707
CONST123 = -66.5558975598707
CONST124 = -62.7495019900557
CONST125 = -56.2781179722634
CONST126 = -55.1852349478554
CONST127 = -55.1852349478554
CONST128 = -50.8329064189723
CONST129 = -50.8329064189723
CONST130 = -562.781179722634
CONST131 = -47.0621264925418
CONST132 = -50.8329064189724
CONST133 = -44.1481879582843
CONST134 = -44.3705983732471
CONST135 = -40.6663251351779
CONST136 = -40.6663251351779
CONST137 = -8.31948719498384
CONST138 = -37.6497011940334
CONST139 = -33.2779487799353
CONST140 = -29.9501539019418
CONST141 = -25.4164532094862
CONST142 = -25.4164532094862
CONST143 = -23.5310632462709
CONST144 = -532.447180478965
CONST145 = -19.2130326861743
CONST146 = -17.5477863187212
CONST147 = -12.8765548211663
CONST148 = -11.6472820729774
CONST149 = -11.2076024002683
CONST150 = -9.1975391579759
CONST151 = -11.0370469895711
CONST152 = -11.7655316231354
CONST153 = -12.8765548211663
CONST154 = -4.80325817154356
CONST155 = -3.32779487799353
CONST156 = -1.60108605718119
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR04 = VAR07 * VAR07
VAR05 = VAR07 * VAR08
VAR16 = y * y * y
VAR17 = y * y
VAR13 = VAR16 * VAR16
VAR14 = VAR16 * VAR17
VAR15 = VAR17 * VAR17
VAR25 = z * z * z
VAR26 = z * z
VAR22 = VAR25 * VAR25
VAR23 = VAR25 * VAR26
VAR24 = VAR26 * VAR26
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += g_0 * (CONST082 * VAR08 * VAR24 - CONST084 * VAR06 * VAR26 +
CONST146 * VAR04 - CONST146 * VAR22) + g_1 * y * (CONST039 * VAR23 +
CONST089 * VAR06 * z + CONST130 * VAR08 * VAR25) + g_10 * (CONST155 *
VAR23 * x + VAR25 * (-CONST105 * VAR17 * x + CONST139 * VAR07) + z *
(-CONST056 * VAR07 * VAR17 + CONST081 * VAR15 * x + CONST140 * VAR05)
) + g_11 * (VAR16 * (CONST044 * VAR26 * x - CONST101 * VAR07) + y *
(CONST054 * VAR24 * x - CONST091 * VAR07 * VAR26 + CONST121 * VAR05)
) + g_12 * (CONST022 * VAR23 * x + VAR25 * (CONST024 * VAR07 +
CONST045 * VAR17 * x) + z * (-CONST044 * VAR07 * VAR17 + CONST126 *
VAR05)) + g_13 * y * (CONST079 * VAR24 * x + CONST125 * VAR05 -
CONST130 * VAR07 * VAR26) + g_14 * (-CONST069 * VAR07 * VAR25 +
CONST109 * VAR05 * z + CONST109 * VAR23 * x) + g_2 * (CONST001 *
VAR08 * (CONST091 * VAR17 * VAR26 - CONST150 * VAR24) + CONST003 *
VAR06 * (CONST012 * VAR26 + CONST016 * VAR17) + CONST055 * VAR17 *
VAR24 + CONST147 * VAR04 + CONST150 * VAR22) + g_3 * (VAR16 * (
CONST044 * VAR08 * z + CONST066 * VAR25) + y * (-CONST091 * VAR06 *
z + CONST133 * VAR23)) + g_4 * (CONST001 * VAR08 * (CONST122 *
VAR17 * VAR26 + CONST134 * VAR15 - CONST137 * VAR24) + CONST003 *
VAR06 * (CONST000 * VAR26 - CONST139 * VAR17) - CONST032 * VAR22 -
CONST105 * VAR15 * VAR26 + CONST111 * VAR17 * VAR24 + CONST148 * VAR04
) + g_5 * (CONST001 * VAR08 * (CONST106 * VAR16 * z - CONST131 *
VAR25 * y) + CONST057 * VAR06 * y * z + CONST107 * VAR16 * VAR25 -
CONST117 * VAR14 * z - CONST143 * VAR23 * y) + g_6 * (CONST001 *
VAR08 * (CONST116 * VAR15 - CONST116 * VAR17 * VAR26 + CONST154 *
VAR24) + CONST003 * VAR06 * (CONST026 * VAR17 + CONST113 * VAR26) +
CONST014 * VAR13 + CONST027 * VAR17 * VAR24 + CONST116 * VAR15 *
VAR26 + CONST149 * VAR04 + CONST156 * VAR22) + g_7 * (CONST114 *
VAR14 * x + VAR16 * (CONST072 * VAR07 + CONST073 * VAR26 * x) + y *
(CONST110 * VAR07 * VAR26 + CONST128 * VAR05 + CONST129 * VAR24 * x)
) + g_8 * (CONST075 * VAR23 * x + VAR25 * (-CONST100 * VAR17 * x +
CONST145 * VAR07) + z * (CONST067 * VAR07 * VAR17 + CONST097 *
VAR05 + CONST100 * VAR15 * x)) + g_9 * (-CONST085 * VAR07 * VAR16 +
CONST117 * VAR14 * x + y * (CONST018 * VAR24 * x + CONST119 * VAR05 +
CONST131 * VAR07 * VAR26))
g_y += g_1 * (CONST039 * VAR23 * x + CONST095 * VAR07 * VAR25 -
CONST125 * VAR05 * z) + g_10 * (CONST123 * VAR23 * y + VAR25 * (-
CONST096 * VAR16 - CONST105 * VAR08 * y) + z * (-CONST093 * VAR06 *
y + CONST144 * VAR08 * VAR16)) + g_11 * (CONST001 * VAR17 * (
CONST025 * VAR06 + CONST025 * VAR24 + CONST092 * VAR08 * VAR26) -
CONST126 * VAR06 * VAR26 - CONST126 * VAR08 * VAR24 + CONST151 *
VAR04 + CONST151 * VAR22) + g_12 * (CONST030 * VAR23 * y + CONST045 *
VAR08 * VAR25 * y - CONST092 * VAR06 * y * z) + g_13 * (CONST076 *
VAR04 - CONST076 * VAR22 - CONST102 * VAR06 * VAR26 + CONST102 *
VAR08 * VAR24) + g_2 * (CONST030 * VAR05 * y + CONST045 * VAR07 *
VAR26 * y - CONST092 * VAR24 * x * y) + g_3 * (CONST001 * VAR17 * (
CONST066 * VAR25 * x + CONST101 * VAR07 * z) - CONST133 * VAR05 * z +
CONST133 * VAR23 * x) + g_4 * (-CONST123 * VAR05 * y + VAR07 * (
CONST096 * VAR16 + CONST104 * VAR26 * y) + x * (CONST093 * VAR24 *
y - CONST144 * VAR16 * VAR26)) + g_5 * (-CONST143 * VAR05 * z +
VAR07 * (CONST062 * VAR17 * z - CONST131 * VAR25) + x * (CONST061 *
VAR17 * VAR25 - CONST062 * VAR15 * z - CONST143 * VAR23)) + g_6 * (
CONST048 * VAR05 * y + VAR07 * (CONST074 * VAR16 - CONST100 * VAR26 *
y) + x * (CONST058 * VAR14 + CONST074 * VAR16 * VAR26 - CONST116 *
VAR24 * y)) + g_7 * (CONST001 * VAR17 * (-CONST112 * VAR08 * VAR26 -
CONST128 * VAR06 - CONST128 * VAR24) + CONST003 * VAR15 * (CONST135 *
VAR08 + CONST136 * VAR26) + CONST020 * VAR13 + CONST050 * VAR04 +
CONST050 * VAR22 + CONST141 * VAR06 * VAR26 + CONST142 * VAR08 * VAR24
) + g_8 * (CONST048 * VAR23 * y + VAR25 * (CONST074 * VAR16 -
CONST100 * VAR08 * y) + z * (CONST049 * VAR06 * y + CONST059 *
VAR14 + CONST074 * VAR08 * VAR16)) + g_9 * (CONST001 * VAR17 * (-
CONST124 * VAR06 + CONST124 * VAR24) + CONST003 * VAR15 * (CONST138 *
VAR08 - CONST138 * VAR26) + CONST009 * VAR08 * VAR24 + CONST152 *
VAR04 + CONST152 * VAR06 * VAR26 - CONST152 * VAR22)
g_z += g_0 * (CONST069 * VAR07 * VAR25 - CONST109 * VAR05 * z -
CONST109 * VAR23 * x) + g_1 * y * (-CONST079 * VAR24 * x - CONST125 *
VAR05 + CONST130 * VAR07 * VAR26) + g_10 * (CONST001 * VAR26 * (-
CONST123 * VAR08 * VAR17 - CONST134 * VAR15 + CONST137 * VAR06) +
CONST003 * VAR24 * (CONST080 * VAR08 + CONST139 * VAR17) + CONST032 *
VAR04 + CONST105 * VAR08 * VAR15 - CONST111 * VAR06 * VAR17 -
CONST148 * VAR22) + g_11 * (VAR16 * (CONST044 * VAR08 * z -
CONST101 * VAR25) + y * (CONST054 * VAR06 * z - CONST091 * VAR08 *
VAR25 + CONST121 * VAR23)) + g_12 * (CONST001 * VAR26 * (CONST091 *
VAR08 * VAR17 - CONST098 * VAR06) + CONST003 * VAR24 * (CONST012 *
VAR08 + CONST016 * VAR17) + CONST055 * VAR06 * VAR17 + CONST098 *
VAR04 + CONST153 * VAR22) + g_13 * y * (-CONST079 * VAR06 * z -
CONST125 * VAR23 + CONST130 * VAR08 * VAR25) + g_14 * (-CONST082 *
VAR06 * VAR26 + CONST084 * VAR08 * VAR24 + CONST146 * VAR04 -
CONST146 * VAR22) + g_2 * (CONST022 * VAR05 * z + VAR07 * (CONST025 *
VAR25 + CONST045 * VAR17 * z) + x * (-CONST044 * VAR17 * VAR25 +
CONST127 * VAR23)) + g_3 * (VAR16 * (-CONST045 * VAR26 * x +
CONST101 * VAR07) + y * (CONST091 * VAR24 * x - CONST133 * VAR05)
) + g_4 * (CONST004 * VAR05 * z + VAR07 * (CONST104 * VAR17 * z -
CONST139 * VAR25) + x * (CONST056 * VAR17 * VAR25 - CONST081 *
VAR15 * z - CONST140 * VAR23)) + g_5 * (-CONST143 * VAR05 * y +
VAR07 * (CONST064 * VAR26 * y + CONST106 * VAR16) + x * (CONST057 *
VAR24 * y + CONST061 * VAR16 * VAR26 - CONST117 * VAR14)) + g_6 * (
CONST097 * VAR05 * z + VAR07 * (-CONST100 * VAR17 * z + CONST145 *
VAR25) + x * (CONST075 * VAR23 + CONST100 * VAR15 * z - CONST100 *
VAR17 * VAR25)) + g_7 * (CONST115 * VAR14 * z + VAR16 * (CONST072 *
VAR25 + CONST073 * VAR08 * z) + y * (CONST112 * VAR08 * VAR25 +
CONST128 * VAR23 + CONST132 * VAR06 * z)) + g_8 * (CONST001 * VAR26 *
(-CONST116 * VAR08 * VAR17 + CONST116 * VAR15 + CONST154 * VAR06) +
CONST003 * VAR24 * (CONST026 * VAR17 + CONST154 * VAR08) + CONST019 *
VAR13 + CONST029 * VAR06 * VAR17 + CONST094 * VAR04 + CONST116 *
VAR08 * VAR15 + CONST149 * VAR22) + g_9 * (CONST085 * VAR16 * VAR25 -
CONST117 * VAR14 * z + y * (CONST037 * VAR08 * VAR25 - CONST119 *
VAR23 + CONST143 * VAR06 * z))
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_7.py |
cf434cf1-eaa7-43a8-af57-ed65644e78a1 | tritonFun.py | microsoft/Givens-Orthogonal-Backprop | rotMat/triton/tritonFun.py | 3040fa287aacbf07be56eb12ddd7c513f7800191 | 0 | @triton.jit
def _forward_kernel(c_ptr, s_ptr, u_ptr, col_stride, row_stride, **meta):
n, n_tilde, dead_index, d_max, tournament_step, BLOCK_SIZE = meta['N'
], meta['N_TILDE'], meta['DEAD_INDEX'], meta['D_MAX'], meta['STEP'
], meta['BLOCK_SIZE']
pid_x = tl.program_id(axis=0)
temp = n_tilde - 1
i = pid_x + tournament_step
if pid_x == 0:
i = 0
if i >= n_tilde:
i -= temp
j = temp - pid_x + tournament_step
if j >= n_tilde:
j -= temp
if i > j:
i, j = j, i
if (j == dead_index) | (j > d_max) & (i > d_max):
return
theta_offset = i * n - (i + 2) * (i + 1) // 2 + j
c = tl.load(c_ptr + theta_offset)
s = tl.load(s_ptr + theta_offset)
offsets = tl.program_id(axis=1) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
output_offsets_i = i * row_stride + offsets * col_stride
output_offsets_j = j * row_stride + offsets * col_stride
maximum = n * row_stride + n * col_stride
maski = output_offsets_i < maximum
maskj = output_offsets_j < maximum
ui = tl.load(u_ptr + output_offsets_i, mask=maski)
uj = tl.load(u_ptr + output_offsets_j, mask=maskj)
ioutput = ui * c - uj * s
joutput = uj * c + ui * s
ui = tl.store(u_ptr + output_offsets_i, ioutput, mask=maski)
uj = tl.store(u_ptr + output_offsets_j, joutput, mask=maskj)
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/microsoft/Givens-Orthogonal-Backprop/blob/3040fa287aacbf07be56eb12ddd7c513f7800191/rotMat/triton/tritonFun.py |
ba747cf4-08d3-4408-bf74-1154ad010718 | triton_chunk.py | NX-AI/xlstm-jax | xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py | 6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7 | 0 | @triton.jit
def chunk_mlstm_fwd_kernel_h(q, k, v, C, n, m, m_total, i, f, h, norm,
s_qk_h, s_qk_t, s_qk_d, s_vh_h, s_vh_t, s_vh_d, s_C_h, s_C_t, s_n_h,
scale, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl
.constexpr):
i_v, i_t, i_bC = tl.program_id(0), tl.program_id(1), tl.program_id(2)
h_i = tl.arange(0, BT)
m_s = h_i[:, None] >= h_i[None, :]
b_h = tl.zeros([BT, BV], dtype=tl.float32)
b_s = tl.zeros([BT, BT], dtype=tl.float32)
b_norm = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
p_q = tl.make_block_ptr(q + i_bC * s_qk_h, (T, K), (s_qk_t, s_qk_d),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bC * s_qk_h, (K, T), (s_qk_d, s_qk_t),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_C = tl.make_block_ptr(C + i_bC * s_C_h + i_t * K * V, (K, V), (
s_C_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_n = tl.make_block_ptr(n + i_bC * s_n_h + i_t * K, (K, BV), (1, 0),
(i_k * BK, 0), (BK, BV), (0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_C = tl.load(p_C, boundary_check=(0, 1))
b_n = tl.load(p_n, boundary_check=(0,))
b_h += tl.dot(b_q, b_C, allow_tf32=False)
b_s += tl.dot(b_q, b_k, allow_tf32=False)
b_n2 = tl.dot(b_q, b_n, allow_tf32=False)
b_norm += b_n2
p_f = f + i_bC * T + i_t * BT + tl.arange(0, BT)
b_f = tl.load(p_f)
p_i = i + i_bC * T + i_t * BT + tl.arange(0, BT)
b_i = tl.load(p_i)
b_m = tl.load(m + i_bC * (NT + 1) + i_t)
b_logD = b_i[None, :] + b_f[:, None] - b_f[None, :]
b_logD = tl.where(m_s, b_logD, -float('inf'))
b_mlogD = tl.max(b_logD, axis=1)
b_m_total = tl.maximum(b_f + b_m, b_mlogD)
p_m_total = tl.make_block_ptr(m_total + T * i_bC, (T,), (1,), (i_t * BT
,), (BT,), (0,))
tl.store(p_m_total, b_m_total.to(p_m_total.dtype.element_ty),
boundary_check=(0,))
b_D = tl.math.exp2(b_logD - b_m_total[:, None])
b_h = b_h * tl.math.exp2(b_f + b_m - b_m_total)[:, None] * scale
b_s = b_s * b_D * scale
b_norm = b_norm * tl.math.exp2(b_f + b_m - b_m_total)[:, None] * scale
b_s = tl.where(m_s, b_s, 0)
b_norm += tl.sum(b_s, axis=1)[:, None]
b_norm = tl.abs(b_norm)
b_norm = tl.maximum(b_norm, tl.math.exp2(-b_m_total)[:, None])
tl.store(norm + i_bC * T + i_t * BT + tl.arange(0, BT), tl.max(b_norm,
axis=1))
p_v = tl.make_block_ptr(v + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_h = (b_h + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)) / b_norm
p_h = tl.make_block_ptr(h + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache",
"BSD"
] | https://github.com/NX-AI/xlstm-jax/blob/6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7/xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py |
185656ec-3bff-4006-8c3e-b0f32117c386 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def array_jagged_bmm_kernel(a_ptr, b_ptr, c_ptr, a_offsets_ptr,
b_offsets_ptr, c_offsets_ptr, D, stride_bk, stride_bn, stride_cm,
stride_cn, transpose, max_seq_len, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, allow_tf32: tl.
constexpr):
pid_batch = tl.program_id(2)
pid_m = tl.program_id(1)
pid_n = tl.program_id(0)
batch_offset_am = tl.load(a_offsets_ptr + pid_batch)
batch_offset_bk = tl.load(b_offsets_ptr + pid_batch)
batch_offset_cm = tl.load(c_offsets_ptr + pid_batch)
batch_K = tl.load(b_offsets_ptr + pid_batch + 1) - batch_offset_bk
batch_M = tl.load(c_offsets_ptr + pid_batch + 1) - batch_offset_cm
stride_am = batch_M * (1 - transpose) + 1 * transpose
stride_ak = batch_M * transpose + 1 * (1 - transpose)
batch_K = tl.minimum(batch_K, max_seq_len)
batch_M = tl.minimum(batch_M, max_seq_len)
if batch_K == 0:
return
batch_N = D
if pid_m * BLOCK_SIZE_M >= batch_M or pid_n * BLOCK_SIZE_N >= batch_N:
return
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % batch_M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % batch_N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + batch_offset_am + (offs_am[:, None] * stride_am +
offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + batch_offset_bk * stride_bk + (offs_k[:, None] *
stride_bk + offs_bn[None, :] * stride_bn)
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(batch_K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=offs_k[None, :] < batch_K - k *
BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < batch_K - k *
BLOCK_SIZE_K, other=0.0)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * batch_offset_cm + stride_cm * offs_cm[:, None
] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < batch_M) & (offs_cn[None, :] < batch_N)
tl.store(c_ptrs, c, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
022c4cc0-b308-49be-b921-b32509712645 | empty.py | triton-lang/triton | python/examples/empty.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr):
pass
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/examples/empty.py |
e008b2d4-e5d7-4904-adbb-d6d877357da0 | gemm_a16w8.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/gemm_a16w8.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _triton_gemm_a16w8_per_channel_kernel(A, B, C, scale_b, bias,
zero_points, M, N, K, stride_am, stride_ak, stride_bn, stride_bk,
stride_cm, stride_cn, stride_zpk, stride_zpn, stride_scalek,
stride_scalen, add_bias: tl.constexpr, add_zero_points: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rbn[:, None] * stride_bn + rk[None, :] * stride_bk)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
if add_zero_points:
offs_zero_points = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
zero_points_ptrs = zero_points + offs_zero_points
_ZERO_POINT0 = tl.zeros([1], dtype=zero_points.dtype.element_ty)
zero_points_vals = tl.load(zero_points_ptrs, mask=offs_zero_points <
N, other=_ZERO_POINT0)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
k_remaining = K - k * (BLOCK_K * SPLIT_K)
_A0 = tl.zeros((1, 1), dtype=A.dtype.element_ty)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_A0)
_B0 = tl.zeros((1, 1), dtype=B.dtype.element_ty)
b = tl.load(B, mask=rk[None, :] < k_remaining, other=_B0)
if add_zero_points:
b = b - zero_points_vals[:, None]
b_fp = b.to(A.dtype.element_ty)
b_fp = tl.trans(b_fp)
acc += tl.dot(a, b_fp, out_dtype=tl.float32, allow_tf32=True)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
offs_scale = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
scale_ptrs = scale_b + offs_scale
_SCALE0 = tl.zeros([1], dtype=scale_b.dtype.element_ty)
scales = tl.load(scale_ptrs, mask=offs_scale < N, other=_SCALE0)
acc *= scales
acc = acc.to(C.dtype.element_ty)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
if add_bias:
offs_bias = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
bias_ptrs = bias + offs_bias
_BIAS0 = tl.zeros([1], dtype=bias.dtype.element_ty)
bias_vals = tl.load(bias_ptrs, mask=offs_bias < N, other=_BIAS0)
if pid_z == 0:
acc += bias_vals[None, :]
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w8.py |
0193e573-d1a1-4efc-bba9-db1a33827ad0 | z_order.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/z_order.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_z_unpadded_kernel(xyz_ptr, batch_idx_ptr, code_ptr, space_size,
x_offset, y_offset, z_offset, str_xyz_n, str_xyz_c, N, BLK: tl.
constexpr, ASSIGN_BATCH_INDEX: tl.constexpr):
pid = tl.program_id(0)
offs_n = pid * BLK + tl.arange(0, BLK)
mask = offs_n < N
xyz_ptrs = xyz_ptr + offs_n * str_xyz_n
fx = tl.load(xyz_ptrs + x_offset * str_xyz_c, mask=mask)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_c, mask=mask)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_c, mask=mask)
ret = _calculate_zorder(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
batch_idx_ptrs = batch_idx_ptr + offs_n
batch_idx = tl.load(batch_idx_ptrs, mask=mask).to(tl.int64)
ret |= batch_idx << 48
code_ptrs = code_ptr + offs_n
tl.store(code_ptrs, ret, mask=mask)
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py |
86d2fa4b-e481-4a17-9385-cbf6f0389011 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_dense_flash_attention_fwd_kernel(q_ptr, k_ptr, v_ptr, ab_ptr,
o_ptr, lse_ptr, jagged_offsets_ptr, max_seq_len, stride_ql, stride_qd,
stride_kb, stride_kd, stride_kt, stride_vn, stride_vd, stride_ab_b,
stride_ab_n, stride_ab_t, stride_ob, stride_ot, stride_od, D: tl.
constexpr, T: tl.constexpr, allow_tf32: tl.constexpr, BLOCK_T: tl.
constexpr, BLOCK_L: tl.constexpr, BLOCK_D: tl.constexpr):
pid_t = tl.program_id(0)
pid_batch = tl.program_id(1)
begin = tl.load(jagged_offsets_ptr + pid_batch)
end = tl.load(jagged_offsets_ptr + pid_batch + 1)
length = end - begin
length = tl.minimum(length, max_seq_len)
if length == 0:
return
q_start_ptr = q_ptr + begin * stride_ql
k_start_ptr = k_ptr + pid_batch * stride_kb
ab_start_ptr = ab_ptr + pid_batch * stride_ab_b
v_start_ptr = v_ptr + begin * stride_vn
offs_t = pid_t * BLOCK_T + tl.arange(0, BLOCK_T)
offs_d = tl.arange(0, BLOCK_D)
ki_ptrs = k_start_ptr + offs_d[:, None] * stride_kd + offs_t[None, :
] * stride_kt
ki = tl.load(ki_ptrs, mask=(offs_d[:, None] < D) & (offs_t[None, :] < T
), other=0.0)
mi = tl.zeros([BLOCK_T], dtype=tl.float32) - float('inf')
li = tl.zeros([BLOCK_T], dtype=tl.float32)
oi = tl.zeros([BLOCK_T, BLOCK_D], dtype=tl.float32)
for start_l in range(0, length, BLOCK_L):
offs_l = start_l + tl.arange(0, BLOCK_L)
qj_ptrs = q_start_ptr + offs_l[:, None] * stride_ql + offs_d[None, :
] * stride_qd
qj = tl.load(qj_ptrs, mask=(offs_l[:, None] < length) & (offs_d[
None, :] < D), other=0.0)
qk = tl.dot(qj, ki, allow_tf32=allow_tf32)
ab_ptrs = ab_start_ptr + offs_l[:, None] * stride_ab_n + offs_t[None, :
] * stride_ab_t
abij = tl.load(ab_ptrs, mask=(offs_l[:, None] < length) & (offs_t[
None, :] < T), other=0.0)
qk = qk + abij
mij_hat = tl.max(qk, axis=0)
mi_new = tl.maximum(mi, mij_hat)
pij_hat = tl.exp(qk - mi_new[None, :])
pij_hat = tl.where((offs_l[:, None] < length) & (offs_t[None, :] <
T), pij_hat, 0.0)
lij_hat = tl.sum(pij_hat, axis=0)
alpha = tl.exp(mi - mi_new)
li_new = alpha * li + lij_hat
oi = alpha[:, None] * oi
vj_ptrs = v_start_ptr + offs_l[:, None] * stride_vn + offs_d[None, :
] * stride_vd
vj = tl.load(vj_ptrs, mask=(offs_l[:, None] < length) & (offs_d[
None, :] < D), other=0.0)
pij_hat = pij_hat.to(v_ptr.dtype.element_ty)
oi = oi + tl.dot(tl.trans(pij_hat), vj, allow_tf32=allow_tf32)
mi = mi_new
li = li_new
oi = oi / li[:, None]
lse_ptrs = lse_ptr + pid_batch * T + offs_t
lse_i = mi + tl.log(li)
tl.store(lse_ptrs, lse_i, mask=offs_t < T)
attn_out_ptrs = o_ptr + pid_batch * stride_ob + offs_t[:, None
] * stride_ot + offs_d[None, :] * stride_od
tl.store(attn_out_ptrs, oi, mask=(offs_t[:, None] < T) & (offs_d[None,
:] < D))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
25c4864d-4f48-498d-bd92-b326c89bc547 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def nll_loss(input, size, reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency",
"Single Instance"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
f9b8875f-1555-4a1e-a553-4f8289afd403 | triton_fused_local_attn.py | LouChao98/vqtree | ops/triton_fused_local_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q, K, V, Out, softmax_scale, stride_qb, stride_qh,
stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh,
stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, WINDOW_SIZE: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
Q_block_ptr = tl.make_block_ptr(base=Q + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h *
stride_vh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_vn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q = tl.load(Q_block_ptr)
else:
q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero')
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 1)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 2)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 3)
acc = acc / l_i[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn.py |
d220be62-ce65-4110-8341-90c6b7412373 | scatter_reduce.py | pyg-team/pyg-lib | pyg_lib/ops/scatter_reduce.py | bdd392a7093c5016f42ec7ae1945ca77dbdd97db | 0 | @triton.jit
def _fused_scatter_reduce_forward_kernel(inputs_ptr, index_ptr, out_ptr,
num_feats, num_reductions, numel, REDUCE0, REDUCE1, REDUCE2, REDUCE3,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < numel
inputs = tl.load(inputs_ptr + offsets, mask=mask)
index_offsets = offsets // num_feats
index = tl.load(index_ptr + index_offsets, mask=mask)
if REDUCE0 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + offsets % num_feats
if REDUCE0 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE0 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE0 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE0 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
if REDUCE1 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + num_feats
out_offsets = out_offsets + offsets % num_feats
if REDUCE1 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE1 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
if REDUCE2 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + 2 * num_feats
out_offsets = out_offsets + offsets % num_feats
if REDUCE2 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
if REDUCE3 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + 3 * num_feats
out_offsets = out_offsets + offsets % num_feats
if REDUCE3 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/pyg-team/pyg-lib/blob/bdd392a7093c5016f42ec7ae1945ca77dbdd97db/pyg_lib/ops/scatter_reduce.py |
04221a8d-0acd-483c-9226-c62cc41c67fe | layer_norm.py | chengzeyi/stable-fast | src/sfast/triton/ops/layer_norm.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @triton.jit
def _layer_norm_fwd_fused(X, Y, W, B, Mean, Rstd, stride: tl.constexpr, N:
tl.constexpr, eps, BLOCK_SIZE: tl.constexpr):
row = tl.program_id(0)
Y += row * stride
X += row * stride
if BLOCK_SIZE >= N:
cols = tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N).to(tl.float32)
m2_ = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
weight_ = (cols < N).to(tl.float32)
_mean, _m2, _weight = x, m2_, weight_
else:
_mean = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
_m2 = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
_weight = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N).to(tl.float32)
m2_ = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
weight_ = (cols < N).to(tl.float32)
if off == 0:
_mean, _m2, _weight = x, m2_, weight_
else:
_mean, _m2, _weight = welford_combine(_mean, _m2, _weight,
x, m2_, weight_)
mean, m2, weight = tl.reduce((_mean, _m2, _weight), 0, welford_combine)
var = m2 / weight
rstd = 1 / tl.sqrt(var + eps)
mean = mean.to(x.dtype)
rstd = rstd.to(x.dtype)
if Mean is not None:
tl.store(Mean + row, mean)
if Rstd is not None:
tl.store(Rstd + row, rstd)
if BLOCK_SIZE >= N:
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < N
if W is None:
w = tl.full((BLOCK_SIZE,), 1.0, dtype=x.dtype)
else:
w = tl.load(W + cols, mask=mask)
if B is None:
b = tl.zeros((BLOCK_SIZE,), dtype=x.dtype)
else:
b = tl.load(B + cols, mask=mask)
x_hat = (x - mean) * rstd
y = x_hat * w + b
tl.store(Y + cols, y, mask=mask)
else:
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
if W is None:
w = tl.full((BLOCK_SIZE,), 1.0, dtype=x.dtype)
else:
w = tl.load(W + cols, mask=mask)
if B is None:
b = tl.zeros((BLOCK_SIZE,), dtype=x.dtype)
else:
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask)
x_hat = (x - mean) * rstd
y = x_hat * w + b
tl.store(Y + cols, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/layer_norm.py |
747983d8-2740-48f1-a5b4-0b2b362c5601 | qkv_concat.py | ai-compiler-study/triton-kernels | triton_kernels/ops/qkv_concat.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def triton_qkv_concat(txt_qkv, img_qkv, out_q_ptr, out_k_ptr, out_v_ptr,
seq_len, num_heads, head_dim, hidden_dim, seq_txt_len, stride_txt_a,
stride_txt_b, stride_img_a, stride_img_b, stride_output_a,
stride_output_b, stride_output_c, XBLOCK: tl.constexpr):
pid = tl.program_id(0)
xoffset = pid * XBLOCK + tl.arange(0, XBLOCK)[:]
seq_idx = xoffset // hidden_dim % seq_len
batch_idx = xoffset // stride_output_a
hidden_dim_idx = xoffset % hidden_dim
headdim_idx = xoffset % head_dim
head_idx = xoffset // head_dim % num_heads
txt_seq_end = tl.full([1], seq_txt_len, tl.int64)
txt_mask = seq_idx < txt_seq_end
img_mask = seq_idx >= txt_seq_end
txt_q_data = tl.load(txt_qkv + (hidden_dim * 0 + hidden_dim_idx +
stride_txt_b * seq_idx + stride_txt_a * batch_idx), txt_mask, other=0.0
).to(tl.float32)
zero_mask = tl.full(txt_q_data.shape, 0.0, txt_q_data.dtype)
masked_txt_q = tl.where(txt_mask, txt_q_data, zero_mask)
img_q_data = tl.load(img_qkv + (-stride_txt_a + hidden_dim * 0 +
hidden_dim_idx + stride_img_b * seq_idx + stride_img_a * batch_idx),
img_mask, other=0.0).to(tl.float32)
zero_mask = tl.full(img_q_data.shape, 0.0, img_q_data.dtype)
masked_img_q = tl.where(img_mask, img_q_data, zero_mask)
out_q = tl.where(txt_mask, masked_txt_q, masked_img_q)
tl.store(out_q_ptr + (headdim_idx + stride_output_c * seq_idx +
stride_output_b * head_idx + stride_output_a * batch_idx), out_q, None)
txt_k_data = tl.load(txt_qkv + (hidden_dim * 1 + hidden_dim_idx +
stride_txt_b * seq_idx + stride_txt_a * batch_idx), txt_mask, other=0.0
).to(tl.float32)
zero_mask = tl.full(txt_k_data.shape, 0.0, txt_k_data.dtype)
masked_txt_q = tl.where(txt_mask, txt_k_data, zero_mask)
img_k_data = tl.load(img_qkv + (-stride_txt_a + hidden_dim * 1 +
hidden_dim_idx + stride_img_b * seq_idx + stride_img_a * batch_idx),
img_mask, other=0.0).to(tl.float32)
zero_mask = tl.full(img_k_data.shape, 0.0, img_k_data.dtype)
masked_img_k = tl.where(img_mask, img_k_data, zero_mask)
out_k = tl.where(txt_mask, masked_txt_q, masked_img_k)
tl.store(out_k_ptr + (headdim_idx + stride_output_c * seq_idx +
stride_output_b * head_idx + stride_output_a * batch_idx), out_k, None)
txt_v_data = tl.load(txt_qkv + (hidden_dim * 2 + hidden_dim_idx +
stride_txt_b * seq_idx + stride_txt_a * batch_idx), txt_mask, other=0.0
).to(tl.float32)
zero_mask = tl.full(txt_v_data.shape, 0.0, txt_v_data.dtype)
masked_txt_v = tl.where(txt_mask, txt_v_data, zero_mask)
img_v_data = tl.load(img_qkv + (-stride_txt_a + hidden_dim * 2 +
hidden_dim_idx + stride_img_b * seq_idx + stride_img_a * batch_idx),
img_mask, other=0.0).to(tl.float32)
zero_mask = tl.full(img_v_data.shape, 0.0, img_v_data.dtype)
masked_img_q = tl.where(img_mask, img_v_data, zero_mask)
output_v = tl.where(txt_mask, masked_txt_v, masked_img_q)
tl.store(out_v_ptr + (headdim_idx + stride_output_c * seq_idx +
stride_output_b * head_idx + stride_output_a * batch_idx), output_v,
None)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/qkv_concat.py |
5d836130-3356-4d7b-9797-41b5b20290c8 | fp8_matmul.py | drisspg/transformer_nuggets | transformer_nuggets/fp8/fp8_matmul.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def apply_scaling(accumulator, a_scale, b_scale, ROW_WISE_SCALING: tl.
constexpr, offs_cm, offs_cn, M, N, stride_a_scale_m, stride_b_scale_n):
if ROW_WISE_SCALING:
a_scales = tl.load(a_scale + offs_cm * stride_a_scale_m, mask=
offs_cm < M, other=0.0)
b_scales = tl.load(b_scale + offs_cn * stride_b_scale_n, mask=
offs_cn < N, other=0.0)
acc_scale = a_scales[:, None] * b_scales[None, :]
else:
acc_scale = a_scale * b_scale
return accumulator * acc_scale
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/fp8_matmul.py |
da11f109-9ed7-4901-b57c-0a34d0fda019 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def chunk_gsa_bwd_k_kernel_intra_dvg(v, g, o, A, do, dv, dg, offsets,
indices, T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl
.constexpr, NG: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl
.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_bh // NG
i_b, i_hq = i_bh // HQ, i_bh % HQ
i_h = i_hq // NG
i_t, i_i = i_c // NC, i_c % NC
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
o_v = i_v * BV + tl.arange(0, BV)
m_v = o_v < V
if i_t * BT + i_i * BC > T:
return
if HEAD_FIRST:
p_gv = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (min(i_t *
BT + i_i * BC + BC, T) - 1) * V + o_v, BV), BV)
else:
p_gv = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V, 1
), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + min(i_t * BT +
i_i * BC + BC, T) - 1) * H * V + i_h * V + o_v, BV), BV)
b_gn = tl.load(p_gn, mask=m_v, other=0)
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_dv = tl.zeros([BC, BV], dtype=tl.float32)
for i_j in range(i_i + 1, NC):
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (BT, T), (1, BT), (
i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
p_A = tl.make_block_ptr(A + (bos * HQ + i_hq) * BT, (BT, T), (1,
HQ * BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
b_g = tl.load(p_g, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_g - b_gn[None, :])).to(b_do.dtype)
b_A = tl.load(p_A, boundary_check=(0, 1))
b_dv += tl.dot(b_A, b_do)
b_dv *= tl.exp(b_gn[None, :] - b_gv)
o_i = tl.arange(0, BC)
o_c = i_i * BC + tl.arange(0, BC)
if HEAD_FIRST:
p_g = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (i_t * BT +
i_i * BC) * V + o_v, BV), BV)
p_A = tl.max_contiguous(tl.multiple_of(A + i_bh * T * BT + (i_t *
BT + i_i * BC) * BT + o_c, BC), BC)
p_do = tl.max_contiguous(tl.multiple_of(do + i_bh * T * V + (i_t *
BT + i_i * BC) * V + o_v, BV), BV)
else:
p_g = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_i *
BC) * H * V + i_h * V + o_v, BV), BV)
p_A = tl.max_contiguous(tl.multiple_of(A + (bos + i_t * BT + i_i *
BC) * HQ * BT + i_hq * BT + o_c, BC), BC)
p_do = tl.max_contiguous(tl.multiple_of(do + (bos + i_t * BT + i_i *
BC) * HQ * V + i_hq * V + o_v, BV), BV)
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
b_A = tl.load(p_A)
b_g = tl.load(p_g, mask=m_v, other=0)
b_do = tl.load(p_do, mask=m_v, other=0)
m_i = o_i[:, None] <= j
b_dv += tl.where(m_i, tl.exp(b_g[None, :] - b_gv) * b_A[:, None] *
b_do[None, :], 0.0)
p_g += (1 if HEAD_FIRST else H) * V
p_A += (1 if HEAD_FIRST else HQ) * BT
p_do += (1 if HEAD_FIRST else HQ) * V
if HEAD_FIRST:
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT +
i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_v = tl.make_block_ptr(v + i_bg * T * V, (T, V), (V, 1), (i_t * BT +
i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_o = tl.make_block_ptr(o + (bos * HQ + i_hq) * V, (T, V), (HQ * V,
1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_v = tl.load(p_v, boundary_check=(0, 1)).to(tl.float32)
b_do = tl.load(p_do, boundary_check=(0, 1)).to(tl.float32)
b_dv = b_dv + tl.load(p_dv, boundary_check=(0, 1)).to(tl.float32)
b_dg = b_o * b_do - b_v * b_dv
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py |
8dbbd600-4b51-4d60-9081-c24273378c71 | y_0.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_0.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def zeroth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor,
block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.
constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr):
block_id = tl.program_id(0)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, 1.0, mask=output_row_offset <
output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_0.py |
98296604-5ff6-454d-b957-63e6735444c9 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_gla_bwd_kernel(q, k, v, g, do, dq, dk, dv, h0, s_k_h, s_k_t,
s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T:
tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK:
tl.constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, CHECK:
tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(h0 + i_bh * K * V, (V, K), (1, V), (i_v *
BV, i_k * BK), (BV, BK), (0, 1))
b_h += tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
mask = i_k * BK + tl.arange(0, BK) < K
for i in range(0, tl.cdiv(T, BT)):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i * BT, i_k * BK), (BT, BK), (1, 0))
p_db = g + i_bh * s_k_h + ((i + 1) * BT - 1
) * s_k_t + i_k * BK + tl.arange(0, BK)
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (
i_v * BV, i * BT), (BV, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
p_dq = tl.make_block_ptr(dq + (i_bh + i_v * B * H) * s_k_h, (T, K),
(s_k_t, s_k_d), (i * BT, i_k * BK), (BT, BK), (1, 0))
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_k = tl.load(p_k, boundary_check=(0, 1))
d_b = tl.load(p_db, mask=mask, other=0).to(tl.float32)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
if CHECK and i == 0:
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_h = b_h * tl.exp(d_b)[None, :] + tl.dot(b_v, b_k.to(b_v.dtype
), allow_tf32=False)
else:
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_h = b_h * tl.exp(d_b)[None, :] + tl.dot(b_v, b_k.to(b_v.dtype
), allow_tf32=False)
b_dq *= scale
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
b_h = None
tl.debug_barrier()
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
for i in range(1, tl.cdiv(T, BT) + 1):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, T - i * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
T - i * BT, i_k * BK), (BT, BK), (1, 0))
p_db = g + i_bh * s_k_h + (T - (i - 1) * BT - 1
) * s_k_t + i_k * BK + tl.arange(0, BK)
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
T - i * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(T - i * BT, i_v * BV), (BT, BV), (1, 0))
p_dk = tl.make_block_ptr(dk + (i_bh + i_v * B * H) * s_k_h, (T, K),
(s_k_t, s_k_d), (T - i * BT, i_k * BK), (BT, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_bh + i_k * B * H) * s_v_h, (T, V),
(s_v_t, s_v_d), (T - i * BT, i_v * BV), (BT, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_db = tl.load(p_db, mask=mask, other=0).to(tl.float32)
if CHECK and i == 1:
b_dk = tl.trans(tl.dot(b_dh.to(b_v.dtype), tl.trans(b_v),
allow_tf32=False))
b_dv = tl.dot(b_k.to(b_v.dtype), b_dh.to(b_v.dtype), allow_tf32
=False)
b_dh = b_dh * tl.exp(b_db)[:, None] + tl.dot(b_q.to(b_do.dtype),
b_do, allow_tf32=False)
else:
b_dk = tl.trans(tl.dot(b_dh.to(b_v.dtype), tl.trans(b_v),
allow_tf32=False))
b_dv = tl.dot(b_k.to(b_v.dtype), b_dh.to(b_v.dtype), allow_tf32
=False)
b_dh = b_dh * tl.exp(b_db)[:, None] + tl.dot(b_q.to(b_do.dtype),
b_do, allow_tf32=False)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py |
601dc13f-efe7-4189-8251-9423cd04334d | foward.py | Forkxz/TritonDeepLearningKernel | kernel/dropconnect/foward.py | add54b6318e8fa5fdbf8c7b47659de9fceaa5691 | 0 | @triton.jit
def dropconnect_fwd_kernel(x_ptr, w_ptr, y_ptr, seed, M, K, N, stride_xm,
stride_xk, stride_wk, stride_wn, stride_ym, stride_yn, stride_dm,
stride_dk, stride_dn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, ALLOWTF32: tl.constexpr):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offset_m = pid_m * BLOCK_SIZE_M
offset_n = pid_n * BLOCK_SIZE_N
offset_k = 0
x_offsets = block_offsets_2d(M, K, stride_xm, stride_xk, offset_m,
offset_k, BLOCK_SIZE_M, BLOCK_SIZE_K)
w_offsets = block_offsets_2d(K, N, stride_wk, stride_wn, offset_k,
offset_n, BLOCK_SIZE_K, BLOCK_SIZE_N)
d_offsets = block_offsets_3d(M, K, N, stride_dm, stride_dk, stride_dn,
offset_m, offset_k, offset_n, BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N)
x_offsets = x_offsets.reshape(BLOCK_SIZE_M, BLOCK_SIZE_K, 1)
w_offsets = w_offsets.reshape(1, BLOCK_SIZE_K, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
x_tile = x_ptr + x_offsets
w_tile = w_ptr + w_offsets
ASM: tl.constexpr = 'cvt.rna.tf32.f32 $0, $1;'
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
random_masks = tl.random.rand(seed, d_offsets) > 0.5
k_mask = offs_k[None, :, None] < K - k * BLOCK_SIZE_K
x_load = tl.load(x_tile, mask=k_mask, other=0.0)
w_load = tl.load(w_tile, mask=k_mask, other=0.0)
a = tl.where(random_masks, x_load, 0.0)
b = tl.where(random_masks, w_load, 0.0)
mul = a * b
accumulator += tl.sum(mul, axis=1)
x_tile += BLOCK_SIZE_K * stride_xk
w_tile += BLOCK_SIZE_K * stride_wk
d_offsets += BLOCK_SIZE_K * stride_dk
y_offset, y_mask = block_offsets_2d(M, N, stride_ym, stride_yn,
offset_m, offset_n, BLOCK_SIZE_M, BLOCK_SIZE_N, True)
y_tile = y_ptr + y_offset
y = accumulator.to(y_tile.dtype.element_ty)
tl.store(y_tile, y, mask=y_mask)
| {
"Data Type": [
"fp32",
"int8"
],
"Functionality": [
"Elementwise Operations",
"Quantization"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/dropconnect/foward.py |
eceee47a-7f5a-4c5d-a084-f79da7308114 | bwd_inner_dq.py | ROCm/aotriton | tritonsrc/bwd_inner_dq.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def bwd_inner_dq(dq, qk_scale, bias_scale, DB_block_ptr, store_db, q,
kt_ptrs, k_stride, vt_ptrs, v_stride, B_block_ptr, do, Di, l_i,
seqlen_q, seqlen_k, head_dim, start_q, lo, hi, dropout_p, dropout_scale,
philox_seed, batch_philox_offset, max_seqlen_k, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, FULL_BLOCKS: tl.
constexpr, CAUSAL: tl.constexpr, ENABLE_DROPOUT: tl.constexpr,
PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.constexpr):
offs_q = start_q + tl.arange(0, BLOCK_M)
offs_k = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
ld_offs_d = None if not PADDED_HEAD else tl.arange(0, BLOCK_DMODEL)
kt_ptrs += lo * k_stride
vt_ptrs += lo * v_stride
if BIAS_TYPE == 1:
B_block_ptr = tl.advance(B_block_ptr, (0, lo))
DB_block_ptr = tl.advance(DB_block_ptr, (0, lo))
"""
K1 K2 (d)V dO
Q1 qk11 qk12 (d)v1 dO1
Q2 qk21 qk22 (d)v2 dO2
QK: (seqlen_q, seqlen_k)
dO: (seqlen_q, hdim)
dV: (seqlen_k, hdim)
"""
for start_k in range(lo, hi, BLOCK_N):
offs_k_curr = offs_k[None, :] + start_k
if not FULL_BLOCKS:
kt = load_fn(kt_ptrs, ld_offs_d, offs_k + start_k, head_dim,
seqlen_k)
else:
kt = load_fn(kt_ptrs, ld_offs_d, None, head_dim, seqlen_k)
if not FULL_BLOCKS:
vt = load_fn(vt_ptrs, ld_offs_d, offs_k + start_k, head_dim,
seqlen_k)
else:
vt = load_fn(vt_ptrs, ld_offs_d, None, head_dim, seqlen_k)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += dot(BLOCK_M, BLOCK_DMODEL, BLOCK_DMODEL, q, kt)
if not FULL_BLOCKS:
k_boundary = tl.full((BLOCK_M,), seqlen_k, dtype=tl.int32)
mask = offs_k_curr < k_boundary[:, None]
qk = tl.where(mask, qk, float('-inf'))
if CAUSAL:
qk = tl.where(offs_q[:, None] >= offs_k_curr, qk, float('-inf'))
if BIAS_TYPE == 0:
pass
elif BIAS_TYPE == 1:
bias = tl.load(B_block_ptr, boundary_check=(0, 1),
padding_option='zero')
qk += bias * bias_scale
else:
tl.static_assert(False, f'Unsupported BIAS_TYPE {BIAS_TYPE}')
p = tl.math.exp2(qk_scale * qk - l_i[:, None])
if not FULL_BLOCKS or CAUSAL:
if qk_scale == 0.0:
p = tl.where(libdevice.isnan(p), 0.0, p)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
dp += dot(BLOCK_M, BLOCK_DMODEL, BLOCK_DMODEL, do, vt)
if ENABLE_DROPOUT:
philox_offset = (batch_philox_offset + start_q * max_seqlen_k +
start_k)
keep = dropout_mask(philox_seed, philox_offset, dropout_p,
BLOCK_M, BLOCK_N, max_seqlen_k)
dp = tl.where(keep, dp * dropout_scale, 0)
ds = p * (dp - Di[:, None])
if BLOCK_M == 1:
dq += tl.view(kt, [BLOCK_DMODEL]) * ds.to(q.type.element_ty)
else:
dq = tl.dot(ds.to(q.type.element_ty), tl.trans(kt), acc=dq)
if BIAS_TYPE == 1:
if store_db:
tl.store(DB_block_ptr, ds.to(DB_block_ptr.type.element_ty),
boundary_check=(0, 1))
kt_ptrs += BLOCK_N * k_stride
vt_ptrs += BLOCK_N * v_stride
if BIAS_TYPE == 1:
B_block_ptr = tl.advance(B_block_ptr, (0, BLOCK_N))
DB_block_ptr = tl.advance(DB_block_ptr, (0, BLOCK_N))
return dq
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access",
"Tiled Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/bwd_inner_dq.py |
774b2df7-3308-4be6-9859-ced86bcaa8fc | gemm_a16w4.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/gemm_a16w4.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _triton_gemm_a16w4_per_channel_kernel(A, B, C, scale_b, bias,
zero_points, M, N, K, rescale_m, rescale_n, rescale_k, stride_am,
stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, stride_zpk,
stride_zpn, stride_scalek, stride_scalen, add_bias: tl.constexpr,
add_zero_points: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.
constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl.
constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rbn[:, None] * stride_bn + rk[None, :] * stride_bk)
acc_l = tl.zeros((BLOCK_N, BLOCK_M), dtype=tl.float32)
acc_h = tl.zeros((BLOCK_N, BLOCK_M), dtype=tl.float32)
_A0 = tl.zeros((1, 1), dtype=A.dtype.element_ty)
_B0 = tl.zeros((1, 1), dtype=B.dtype.element_ty)
if add_zero_points:
offs_zero_points = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N)
zero_points_ptrs = zero_points + offs_zero_points
_ZERO_POINT0 = tl.zeros([1], dtype=zero_points.dtype.element_ty)
zero_points_vals = tl.load(zero_points_ptrs, mask=offs_zero_points <
2 * N, other=_ZERO_POINT0)
zero_points_vals = tl.reshape(zero_points_vals, (BLOCK_N, 2))
zp_l, zp_h = tl.split(zero_points_vals)
offs_scale = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N)
scale_ptrs = scale_b + offs_scale
_SCALE0 = tl.zeros([1], dtype=scale_b.dtype.element_ty)
scales = tl.load(scale_ptrs, mask=offs_scale < 2 * N, other=_SCALE0)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
k_remaining = K - k * (BLOCK_K * SPLIT_K)
b_int4_two = tl.load(B, mask=rk[None, :] < k_remaining, other=_B0)
b_int4_l = b_int4_two.__lshift__(4).to(tl.int8).__rshift__(4).to(A.
dtype.element_ty)
b_int4_h = b_int4_two.__rshift__(4).to(A.dtype.element_ty)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_A0)
a = tl.trans(a)
if add_zero_points:
b_int4_l -= zp_l[:, None]
b_int4_h -= zp_h[:, None]
acc_l += tl.dot(b_int4_l, a, out_dtype=tl.float32, allow_tf32=True)
acc_h += tl.dot(b_int4_h, a, out_dtype=tl.float32, allow_tf32=True)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
acc_l = tl.trans(acc_l)
acc_h = tl.trans(acc_h)
acc = tl.interleave(acc_l, acc_h)
offs_scale = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N)
scale_ptrs = scale_b + offs_scale
_SCALE0 = tl.zeros([1], dtype=scale_b.dtype.element_ty)
scales = tl.load(scale_ptrs, mask=offs_scale < 2 * N, other=_SCALE0)
acc *= scales[None, :]
acc = acc.to(C.dtype.element_ty)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N)
mask = (rm < M)[:, None] & (rn < 2 * N)[None, :]
if add_bias:
offs_bias = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N)
bias_ptrs = bias + offs_bias
_BIAS0 = tl.zeros([1], dtype=bias.dtype.element_ty)
bias_vals = tl.load(bias_ptrs, mask=offs_bias < 2 * N, other=_BIAS0)
if pid_z == 0:
acc += bias_vals[None, :]
if SPLIT_K == 1:
tl.store(C + rm[:, None] * stride_cm + rn[None, :], acc, mask=mask)
else:
tl.atomic_add(C + rm[:, None] * stride_cm + rn[None, :], acc, mask=mask
)
| {
"Data Type": [
"int8",
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w4.py |
32a86171-95db-478d-aace-264ce85654bc | chunk_h_parallel.py | sustcsonglin/flash-linear-attention | fla/ops/common/chunk_h_parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'
] is not None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32,
64, 128] for num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=[
'BT', 'USE_G', 'USE_GK', 'USE_GV'])
@triton.jit
def chunk_bwd_kernel_dh_parallel(q, g, gk, gv, do, dh, dht, dh0, offsets,
indices, scale, T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV:
tl.constexpr, NG: tl.constexpr, USE_G: tl.constexpr, USE_GK: tl.
constexpr, USE_GV: tl.constexpr, STORE_INITIAL_STATE_GRADIENT: tl.
constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr, USE_OFFSETS: tl.
constexpr, HEAD_FIRST: tl.constexpr):
i_kv, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
NV = tl.cdiv(V, BV)
i_k, i_v = i_kv // NV, i_kv % NV
i_b, i_hq, i_bg = i_bh // HQ, i_bh % HQ, i_bh // NG
i_h = i_hq // NG
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
bos, eos = i_b * T, i_b * T + T
NT = tl.cdiv(T, BT)
i_n, i_tg = i_b, i_b * NT + i_t
i_nh = i_n * HQ + i_hq
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k * BK,
i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_dh = tl.make_block_ptr(dh + (i_bh * NT + i_t) * K * V, (K, V), (V,
1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * HQ + i_hq) * K, (K, T), (1, HQ *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (K, V), (V,
1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
if i_t == NT - 1:
if USE_FINAL_STATE_GRADIENT:
p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), (
i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_dh = tl.load(p_dht, boundary_check=(0, 1)).to(tl.float32)
else:
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_do = tl.load(p_do, boundary_check=(0, 1))
if USE_G:
if HEAD_FIRST:
p_g = g + i_bg * T + i_t * BT + tl.arange(0, BT)
p_g = tl.max_contiguous(tl.multiple_of(p_g, BT), BT)
else:
p_g = g + (bos + i_t * BT + tl.arange(0, BT)) * H + i_h
b_g = tl.load(p_g, mask=i_t * BT + tl.arange(0, BT) < T, other=0.0)
b_q = (b_q * tl.exp(b_g)[None, :]).to(b_q.dtype)
if USE_GK:
if HEAD_FIRST:
p_gk = tl.make_block_ptr(gk + i_bg * T * K, (K, T), (1, K), (
i_k * BK, i_t * BT), (BK, BT), (0, 1))
else:
p_gk = tl.make_block_ptr(gk + (bos * H + i_h) * K, (K, T), (1,
H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_q = (b_q * tl.exp(b_gk)).to(b_q.dtype)
if USE_GV:
if HEAD_FIRST:
p_gv = tl.make_block_ptr(gv + i_bg * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
else:
p_gv = tl.make_block_ptr(gv + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_gv)).to(b_do.dtype)
b_dh = tl.dot(b_q, b_do)
if i_t > 0:
if HEAD_FIRST:
p_dh = tl.make_block_ptr(dh + (i_bh * NT + i_t - 1) * K * V, (K,
V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_dh = tl.make_block_ptr(dh + ((i_tg - 1) * H + i_h) * K * V, (
K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
elif STORE_INITIAL_STATE_GRADIENT:
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access",
"Tiled Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_parallel.py |
10461b26-96c5-475b-b85e-1317b069b740 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def scan_op(x1, y1, x2, y2):
z1 = x2 * x1
z2 = x2 * y1 + y2
return z1, z2
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
e72ea98e-3167-44ec-923d-f412cbff14b8 | naive_associative_rnn_scan.py | TushaarGVS/linear-rnn | linear_rnn/triton/naive_associative_rnn_scan.py | 48320589b73154484be7d09a144923a2b9e56b85 | 0 | @triton.jit
def _associative_scan_op(a_l, x_l, a_r, x_r):
return a_r * a_l, a_r * x_l + x_r
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/naive_associative_rnn_scan.py |
489248a3-bb60-40d3-a95f-13874bdd6bf8 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4]], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_gated_delta_rule_bwd_kernel_dqkw(q, k, v, w, g, h, do, dh, dq, dk,
dv, dw, dg, offsets, indices, scale, T: tl.constexpr, H: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr,
BV: tl.constexpr, NT: tl.constexpr, USE_OFFSETS: tl.constexpr,
HEAD_FIRST: tl.constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k * BK,
i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H * K),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_dw = tl.zeros([BT, BK], dtype=tl.float32)
b_ds = tl.zeros([BT, BT], dtype=tl.float32)
b_dg = tl.zeros([BT], dtype=tl.float32)
b_dg_last = tl.zeros([1], dtype=tl.float32)
last_idx = min((i_t + 1) * BT, T) - 1
if HEAD_FIRST:
b_g_last = tl.load(g + i_bh * T + last_idx)
else:
b_g_last = tl.load(g + (bos + last_idx) * H + i_h)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * NT * K * V + i_t * K * V, (V,
K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + i_bh * NT * K * V + i_t * K * V,
(V, K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dg_last += tl.sum(b_h * b_dh)
b_ds += tl.dot(b_do, tl.trans(b_v), allow_tf32=False)
b_dq += tl.dot(b_do, b_h, allow_tf32=False)
b_dk += tl.dot(b_v, b_dh, allow_tf32=False)
b_dv = tl.load(p_dv, boundary_check=(0, 1))
b_dw += tl.dot(b_dv.to(b_v.dtype), b_h.to(b_v.dtype), allow_tf32=False)
b_dg_last *= tl.exp(b_g_last)
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dw = tl.make_block_ptr(dw + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,
), (0,))
p_dg = tl.make_block_ptr(dg + i_bh * T, (T,), (1,), (i_t * BT,), (
BT,), (0,))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dw = tl.make_block_ptr(dw + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,),
(BT,), (0,))
p_dg = tl.make_block_ptr(dg + bos * H + i_h, (T,), (H,), (i_t * BT,
), (BT,), (0,))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0,))
b_w = tl.load(p_w, boundary_check=(0, 1))
b_g_exp_qw = tl.exp(b_g)
b_dq *= b_g_exp_qw[:, None] * scale
b_dg += tl.sum(b_dq * b_q, axis=1)
b_dw *= b_g_exp_qw[:, None]
b_dg -= tl.sum(b_dw * b_w, axis=1)
b_dk *= tl.exp(b_g_last - b_g)[:, None]
b_dg -= tl.sum(b_dk * b_k, axis=1)
b_dg_last += tl.sum(b_dk * b_k)
b_g_exp_qw = None
b_ds = tl.where(o_i[:, None] >= o_i[None, :], b_ds * scale * tl.exp(b_g
[:, None] - b_g[None, :]), 0).to(b_q.dtype)
b_dg_mask = tl.dot(b_q, tl.trans(b_k), allow_tf32=False) * b_ds
b_dg += tl.sum(b_dg_mask, axis=1)
b_dg -= tl.sum(b_dg_mask, axis=0)
b_dq += tl.dot(b_ds, b_k, allow_tf32=False)
b_dk += tl.trans(tl.dot(tl.trans(b_q), b_ds, allow_tf32=False))
b_dg = tl.where(o_i < min(BT, T - i_t * BT) - 1, b_dg, b_dg + b_dg_last)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dw, -b_dw.to(p_dw.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py |
ba1125e7-41b5-4aee-9a38-9f0cb8548a00 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def accum_linear(accum, input1, input2, fp16: tl.constexpr, tf32: tl.constexpr
):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
cecb82d3-5254-4b2d-9ca3-137d666bad12 | tuned_bwd.py | ROCm/aotriton | tritonsrc/tuned_bwd.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.autotune(configs=TRITON_CONFIG_LIST_BWD, key=['BLOCK_DMODEL',
'max_seqlen_q', 'max_seqlen_k'])
@triton.jit
def tuned_bwd_kernel_dk_dv(Q, K, V, B, sm_scale, Out, DO, DK, DV, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dkz, stride_dkh, stride_dkn, stride_dkk,
stride_dvz, stride_dvh, stride_dvk, stride_dvn, cu_seqlens_q,
cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k, head_dim,
dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.
constexpr):
bare_bwd_kernel_dk_dv(Q, K, V, B, sm_scale, Out, DO, DK, DV, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dkz, stride_dkh, stride_dkn,
stride_dkk, stride_dvz, stride_dvh, stride_dvk, stride_dvn,
cu_seqlens_q, cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k,
head_dim, dropout_p, philox_seed, philox_offset_base, BLOCK_M,
BLOCK_DMODEL, BLOCK_N, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD=
PADDED_HEAD, BIAS_TYPE=BIAS_TYPE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/tuned_bwd.py |
0bb116ff-c023-4106-b0ff-399e5628a32e | layernorm.py | sustcsonglin/flash-linear-attention | fla/modules/layernorm.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'RECOMPUTE_OUTPUT': lambda args: args['Y'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['N', 'HAS_DRESIDUAL', 'STORE_DRESIDUAL',
'IS_RMS_NORM', 'HAS_BIAS'])
@triton.jit
def layer_norm_bwd_kernel(X, W, B, Y, DY, DX, DW, DB, DRESIDUAL,
DRESIDUAL_IN, Mean, Rstd, stride_x_row, stride_y_row, stride_dy_row,
stride_dx_row, stride_dres_row, stride_dres_in_row, M, N, G,
rows_per_program, programs_per_group, IS_RMS_NORM: tl.constexpr,
BLOCK_N: tl.constexpr, HAS_DRESIDUAL: tl.constexpr, STORE_DRESIDUAL: tl
.constexpr, HAS_WEIGHT: tl.constexpr, HAS_BIAS: tl.constexpr,
RECOMPUTE_OUTPUT: tl.constexpr):
row_block_id = tl.program_id(0)
group_id, program_id_in_group = (row_block_id // programs_per_group,
row_block_id % programs_per_group)
row_start = group_id + program_id_in_group * G * rows_per_program
row_end = min(row_start + G * rows_per_program, M)
cols = tl.arange(0, BLOCK_N)
mask = cols < N
if HAS_WEIGHT:
w = tl.load(W + group_id * stride_x_row + cols, mask=mask).to(tl.
float32)
dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
if RECOMPUTE_OUTPUT and HAS_BIAS:
b = tl.load(B + group_id * stride_x_row + cols, mask=mask, other=0.0
).to(tl.float32)
if HAS_BIAS:
db = tl.zeros((BLOCK_N,), dtype=tl.float32)
for row in range(row_start, row_end, G):
x = tl.load(X + row * stride_x_row + cols, mask=mask, other=0).to(tl
.float32)
dy = tl.load(DY + row * stride_dy_row + cols, mask=mask, other=0).to(tl
.float32)
if not IS_RMS_NORM:
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
xhat = tl.where(mask, xhat, 0.0)
if RECOMPUTE_OUTPUT:
y = xhat * w if HAS_WEIGHT else xhat
if HAS_BIAS:
y = y + b
tl.store(Y + row * stride_y_row + cols, y, mask=mask)
wdy = dy
if HAS_WEIGHT:
wdy = dy * w
dw += dy * xhat
if HAS_BIAS:
db += dy
if not IS_RMS_NORM:
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
else:
c1 = tl.sum(xhat * wdy, axis=0) / N
dx = (wdy - xhat * c1) * rstd
if HAS_DRESIDUAL:
dres = tl.load(DRESIDUAL + row * stride_dres_row + cols, mask=
mask, other=0).to(tl.float32)
dx += dres
if STORE_DRESIDUAL:
tl.store(DRESIDUAL_IN + row * stride_dres_in_row + cols, dx,
mask=mask)
tl.store(DX + row * stride_dx_row + cols, dx, mask=mask)
if HAS_WEIGHT:
tl.store(DW + row_block_id * N + cols, dw, mask=mask)
if HAS_BIAS:
tl.store(DB + row_block_id * N + cols, db, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/layernorm.py |
bbfcb74d-d097-408e-89dc-20d7bdf56c4b | 09-experimental-tma-matrix-multiplication.py | hgl71964/SIP | benchmarks/09-experimental-tma-matrix-multiplication.py | 767ed720d4bd5cee21670b125b62c434258c532b | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages
=7, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel(a_ptr, b_ptr, z_ptr, M, N, K, stride_am, stride_ak,
stride_bk, stride_bn, stride_zm, stride_zn, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr, A_ORDER_0: tl.constexpr, A_ORDER_1: tl.constexpr,
B_ORDER_0: tl.constexpr, B_ORDER_1: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
block_offset_m = pid_m * BLOCK_SIZE_M
block_offset_n = pid_n * BLOCK_SIZE_N
a_tile_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(
stride_am, stride_ak), offsets=(block_offset_m, 0), block_shape=(
BLOCK_SIZE_M, BLOCK_SIZE_K), order=(A_ORDER_0, A_ORDER_1))
b_tile_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(
stride_bk, stride_bn), offsets=(0, block_offset_n), block_shape=(
BLOCK_SIZE_K, BLOCK_SIZE_N), order=(B_ORDER_0, B_ORDER_1))
z = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
offs_m = block_offset_m + tl.arange(0, BLOCK_SIZE_M)
offs_n = block_offset_n + tl.arange(0, BLOCK_SIZE_N)
z_ptrs = z_ptr + offs_m[:, None] * stride_zm + offs_n[None, :] * stride_zn
mask = (offs_m < M)[:, None] & (offs_n < N)[None, :]
for k in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_tile_ptr)
b = tl.load(b_tile_ptr)
z += tl.dot(a, b)
a_tile_ptr = tl.advance(a_tile_ptr, [0, BLOCK_SIZE_K])
b_tile_ptr = tl.advance(b_tile_ptr, [BLOCK_SIZE_K, 0])
z = z.to(tl.float16)
tl.store(z_ptrs, z, mask=mask)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/hgl71964/SIP/blob/767ed720d4bd5cee21670b125b62c434258c532b/benchmarks/09-experimental-tma-matrix-multiplication.py |
6901e5d7-7826-4fb5-b04f-6731b4bdf651 | lstm_bw.py | NX-AI/flashrnn | flashrnn/flashrnn/triton_fused/lstm_bw.py | 3fca666a81c8740af4878d7bc5e2a51900e4fe14 | 0 | @triton.jit
def _backward_sequence_kernel(delta_states_all_outside,
delta_states_last_outside, R, states_all, gates_all,
delta_states_initial, delta_Wx, delta_R, delta_b, T: tl.constexpr, NS:
tl.constexpr, B: tl.constexpr, NH: tl.constexpr, DH: tl.constexpr, NGI:
tl.constexpr, NGR: tl.constexpr, siz_B: tl.constexpr, DTYPE: tl.
constexpr=tl.float32, backward_recurrent_clip_val: tl.constexpr=-1.0):
idx_b_NH, idx_b_B = tl.program_id(0), tl.program_id(1)
str_matR_B = NH * NGR * DH * DH
str_matR_NH = NGR * DH * DH
str_matR_NGR = DH * DH
str_matStatesAll_NH = (T + 1) * NS * B * DH
str_matStatesAll_T = NS * B * DH
str_matGatesAll_NH = T * NGI * B * DH
str_matGatesAll_T = NGI * B * DH
str_delta_states_all_outside_NH = T * NS * B * DH
str_delta_states_all_outside_T = NS * B * DH
str_matDeltaWx_NH = T * NGI * B * DH
str_matDeltaWx_T = NGI * B * DH
matDeltaHtrans_last_ptr = tl.make_block_ptr(base=
delta_states_last_outside + idx_b_NH * NS * B * DH + 0 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matDeltaH_tplus1 = tl.load(matDeltaHtrans_last_ptr).to(tl.float32)
matDeltaCtrans_last_ptr = tl.make_block_ptr(base=
delta_states_last_outside + idx_b_NH * NS * B * DH + 1 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matDeltaC_tplus1 = tl.load(matDeltaCtrans_last_ptr).to(tl.float32)
matR_i_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 0 *
str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matR_i = tl.load(matR_i_ptr)
matR_f_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 1 *
str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matR_f = tl.load(matR_f_ptr)
matR_z_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 2 *
str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matR_z = tl.load(matR_z_ptr)
matR_o_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 3 *
str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matR_o = tl.load(matR_o_ptr)
matDeltaR_i = tl.zeros((DH, DH), dtype=tl.float32)
matDeltaR_f = tl.zeros((DH, DH), dtype=tl.float32)
matDeltaR_z = tl.zeros((DH, DH), dtype=tl.float32)
matDeltaR_o = tl.zeros((DH, DH), dtype=tl.float32)
vecDeltaB_i = tl.zeros((DH,), dtype=tl.float32)
vecDeltaB_f = tl.zeros((DH,), dtype=tl.float32)
vecDeltaB_z = tl.zeros((DH,), dtype=tl.float32)
vecDeltaB_o = tl.zeros((DH,), dtype=tl.float32)
for idx_t in range(T - 1, -1, -1):
matG_i_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH *
str_matGatesAll_NH + idx_t * str_matGatesAll_T + 0 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matG_i = tl.load(matG_i_ptr)
matG_f_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH *
str_matGatesAll_NH + idx_t * str_matGatesAll_T + 1 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matG_f = tl.load(matG_f_ptr)
matG_z_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH *
str_matGatesAll_NH + idx_t * str_matGatesAll_T + 2 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matG_z = tl.load(matG_z_ptr)
matG_o_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH *
str_matGatesAll_NH + idx_t * str_matGatesAll_T + 3 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matG_o = tl.load(matG_o_ptr)
matC_t_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 1 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
matC_t = tl.load(matC_t_ptr)
matC_tminus1_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + idx_t * str_matStatesAll_T + 1 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matC_tminus1 = tl.load(matC_tminus1_ptr)
matH_tminus1_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + idx_t * str_matStatesAll_T + 0 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
matH_tminus1 = tl.load(matH_tminus1_ptr)
matDeltaCtrans_out_t_ptr = tl.make_block_ptr(base=
delta_states_all_outside + idx_b_NH *
str_delta_states_all_outside_NH + idx_t *
str_delta_states_all_outside_T + 1 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matDeltaCtrans_out_t = tl.load(matDeltaCtrans_out_t_ptr)
matDeltaHtrans_out_t_ptr = tl.make_block_ptr(base=
delta_states_all_outside + idx_b_NH *
str_delta_states_all_outside_NH + idx_t *
str_delta_states_all_outside_T + 0 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matDeltaHtrans_out_t = tl.load(matDeltaHtrans_out_t_ptr)
matDeltaH_t = matDeltaHtrans_out_t + matDeltaH_tplus1
matDeltaC_t = matDeltaCtrans_out_t + matDeltaC_tplus1
matCtrans_t_tanh = triton_tanh(matC_t)
matDeltaC_t = matDeltaC_t + matDeltaH_t * matG_o * (1 -
matCtrans_t_tanh * matCtrans_t_tanh)
matDeltaGI = matDeltaC_t * matG_z * (1 - matG_i) * matG_i
matDeltaGF = matDeltaC_t * matC_tminus1 * (1 - matG_f) * matG_f
matDeltaGZ = matDeltaC_t * matG_i * (1 - matG_z * matG_z)
matDeltaGO = matDeltaH_t * matCtrans_t_tanh * (1 - matG_o) * matG_o
matDeltaC_tminus1 = matDeltaC_t * matG_f
matDeltaH_tminus1 = tl.dot(matDeltaGI.to(DTYPE), matR_i)
matDeltaH_tminus1 += tl.dot(matDeltaGF.to(DTYPE), matR_f)
matDeltaH_tminus1 += tl.dot(matDeltaGZ.to(DTYPE), matR_z)
matDeltaH_tminus1 += tl.dot(matDeltaGO.to(DTYPE), matR_o)
matDeltaR_i += tl.dot(tl.trans(matDeltaGI.to(DTYPE)), matH_tminus1)
matDeltaR_f += tl.dot(tl.trans(matDeltaGF.to(DTYPE)), matH_tminus1)
matDeltaR_z += tl.dot(tl.trans(matDeltaGZ.to(DTYPE)), matH_tminus1)
matDeltaR_o += tl.dot(tl.trans(matDeltaGO.to(DTYPE)), matH_tminus1)
vecDeltaB_i += tl.sum(matDeltaGI, axis=0)
vecDeltaB_f += tl.sum(matDeltaGF, axis=0)
vecDeltaB_z += tl.sum(matDeltaGZ, axis=0)
vecDeltaB_o += tl.sum(matDeltaGO, axis=0)
matDeltaGI_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH *
str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 0 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matDeltaGI_ptr, matDeltaGI.to(DTYPE))
matDeltaGF_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH *
str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 1 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matDeltaGF_ptr, matDeltaGF.to(DTYPE))
matDeltaGZ_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH *
str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 2 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matDeltaGZ_ptr, matDeltaGZ.to(DTYPE))
matDeltaGO_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH *
str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 3 * B * DH,
shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matDeltaGO_ptr, matDeltaGO.to(DTYPE))
matDeltaH_tplus1 = matDeltaH_tminus1
matDeltaC_tplus1 = matDeltaC_tminus1
matDeltaHtrans_initial_ptr = tl.make_block_ptr(base=
delta_states_initial + idx_b_NH * NS * B * DH + 0 * B * DH, shape=(
B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=
(siz_B, DH), order=(0, 1))
tl.store(matDeltaHtrans_initial_ptr, matDeltaH_tplus1.to(DTYPE))
matDeltaCtrans_initial_ptr = tl.make_block_ptr(base=
delta_states_initial + idx_b_NH * NS * B * DH + 1 * B * DH, shape=(
B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=
(siz_B, DH), order=(0, 1))
tl.store(matDeltaCtrans_initial_ptr, matDeltaC_tplus1.to(DTYPE))
matDeltaR_i_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B +
idx_b_NH * str_matR_NH + 0 * str_matR_NGR, shape=(DH, DH), strides=
(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1))
tl.store(matDeltaR_i_ptr, matDeltaR_i.to(DTYPE))
matDeltaR_f_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B +
idx_b_NH * str_matR_NH + 1 * str_matR_NGR, shape=(DH, DH), strides=
(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1))
tl.store(matDeltaR_f_ptr, matDeltaR_f.to(DTYPE))
matDeltaR_z_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B +
idx_b_NH * str_matR_NH + 2 * str_matR_NGR, shape=(DH, DH), strides=
(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1))
tl.store(matDeltaR_z_ptr, matDeltaR_z.to(DTYPE))
matDeltaR_o_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B +
idx_b_NH * str_matR_NH + 3 * str_matR_NGR, shape=(DH, DH), strides=
(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1))
tl.store(matDeltaR_o_ptr, matDeltaR_o.to(DTYPE))
vecDeltaB_i_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI *
DH + 0 * DH + tl.arange(0, DH))
tl.store(vecDeltaB_i_ptr, vecDeltaB_i.to(DTYPE))
vecDeltaB_f_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI *
DH + 1 * DH + tl.arange(0, DH))
tl.store(vecDeltaB_f_ptr, vecDeltaB_f.to(DTYPE))
vecDeltaB_z_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI *
DH + 2 * DH + tl.arange(0, DH))
tl.store(vecDeltaB_z_ptr, vecDeltaB_z.to(DTYPE))
vecDeltaB_o_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI *
DH + 3 * DH + tl.arange(0, DH))
tl.store(vecDeltaB_o_ptr, vecDeltaB_o.to(DTYPE))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT",
"BSD"
] | https://github.com/NX-AI/flashrnn/blob/3fca666a81c8740af4878d7bc5e2a51900e4fe14/flashrnn/flashrnn/triton_fused/lstm_bw.py |
67e19a21-a90d-4dc1-acd6-5034e67c1d5c | FleetAttention_triton.py | Computational-Machine-Intelligence/LeetDecoding | leetDecoding/methods/FleetAttention_triton.py | 1b545c2f5bacc155255250d1f70ac9484744559a | 0 | @triton.jit
def FleetAttention_kernel(B_ptr, C_ptr, V_ptr, ans_ptr, seqlen: tl.
constexpr, dim: tl.constexpr, rank: tl.constexpr, stride_vbh: tl.
constexpr, stride_bbh: tl.constexpr, dim_BLOCK: tl.constexpr):
rank_idx = tl.program_id(axis=0)
bz = tl.program_id(axis=1)
dim_block_idx = tl.program_id(axis=2)
off_b = tl.arange(0, 1)
off_dim = tl.arange(0, dim_BLOCK)
cv = tl.zeros([1, dim_BLOCK], dtype=tl.float32)
o = tl.zeros([1, dim_BLOCK], dtype=tl.float32)
for seq_idx in range(seqlen):
offs_bc = bz * stride_bbh + seq_idx * rank + rank_idx + off_b[None, :]
offs_v = (bz * stride_vbh + seq_idx * dim + dim_block_idx *
dim_BLOCK + off_dim[None, :])
ans_ptrs = (ans_ptr + bz * stride_vbh + seq_idx * dim +
dim_block_idx * dim_BLOCK + off_dim[None, :])
v_ptrs = V_ptr + offs_v
b_ptr = B_ptr + offs_bc
c_ptr = C_ptr + offs_bc
b = tl.load(b_ptr, mask=off_b[None, :] < 1, other=0)
c = tl.load(c_ptr, mask=off_b[None, :] < 1, other=0)
v = tl.load(v_ptrs, mask=off_dim[None, :] < dim, other=0)
cv = c * v + cv
o = b * cv
ans = tl.load(ans_ptrs, mask=off_dim[None, :] < dim, other=0)
tl.store(ans_ptrs, ans + o, mask=off_dim[None, :] < dim)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/Computational-Machine-Intelligence/LeetDecoding/blob/1b545c2f5bacc155255250d1f70ac9484744559a/leetDecoding/methods/FleetAttention_triton.py |
1352a7a5-ebd7-4fa6-b706-8efe63475295 | fwd_kernel.py | ROCm/aotriton | test/fwd_kernel.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m,
seqlen_q, seqlen_k, dropout_p, philox_seed, batch_philox_offset,
encoded_softmax_block_ptr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.
constexpr, BLOCK_N: tl.constexpr, STAGE: tl.constexpr, offs_m: tl.
constexpr, offs_n: tl.constexpr, pre_load_v: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr, RETURN_ENCODED_SOFTMAX: tl.constexpr):
if STAGE == 1:
lo, hi = 0, min(seqlen_k, start_m * BLOCK_M)
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, min(seqlen_k, start_m * BLOCK_M + BLOCK_M)
lo = tl.multiple_of(lo, BLOCK_M)
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr,
(0, lo))
else:
lo, hi = 0, seqlen_k
for start_n in range(lo, hi, BLOCK_N):
if STAGE == 1 or STAGE == 3:
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(K_block_ptr)
if pre_load_v:
v = tl.load(V_block_ptr)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if STAGE == 2:
mask = offs_m[:, None] >= start_n + offs_n[None, :]
qk = tl.where(mask, qk, float('-inf'))
if BLOCK_M == 1:
qk += tl.sum(tl.view(q, [BLOCK_DMODEL]) * tl.view(k, [
BLOCK_DMODEL]))
else:
qk += tl.dot(q, k)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
if ENABLE_DROPOUT:
philox_offset = (batch_philox_offset + start_m * BLOCK_M *
seqlen_k + start_n)
keep = dropout_mask(philox_seed, philox_offset, dropout_p,
BLOCK_M, BLOCK_N, seqlen_k)
if RETURN_ENCODED_SOFTMAX:
tl.store(encoded_softmax_block_ptr, tl.where(keep, p, -p).
to(encoded_softmax_block_ptr.type.element_ty))
p = tl.where(keep, p, 0.0)
elif RETURN_ENCODED_SOFTMAX:
tl.store(encoded_softmax_block_ptr, p.to(
encoded_softmax_block_ptr.type.element_ty))
alpha = tl.math.exp2(m_i - m_ij)
acc = acc * alpha[:, None]
if not pre_load_v:
v = tl.load(V_block_ptr)
"""
if ENABLE_DROPOUT:
v = (v / (1.0 - dropout_p)).to(V_block_ptr.type.element_ty)
"""
l_i = l_i * alpha + l_ij
m_i = m_ij
if BLOCK_M == 1:
acc += tl.view(p.to(V_block_ptr.type.element_ty), [1]) * tl.view(v,
[BLOCK_DMODEL])
else:
acc += tl.dot(p.to(V_block_ptr.type.element_ty), v)
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr,
(0, BLOCK_N))
return acc, l_i, m_i
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/fwd_kernel.py |
1310b43a-9ced-4062-b861-4e7f7c35f090 | cumsum.py | sustcsonglin/flash-linear-attention | fla/ops/utils/cumsum.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BS': BS}, num_warps=num_warps) for
BS in [16, 32, 64] for num_warps in [2, 4, 8]], key=['S', 'BT'])
@triton.jit
def chunk_local_cumsum_vector_kernel(s, o, offsets, indices, T: tl.
constexpr, H: tl.constexpr, S: tl.constexpr, BT: tl.constexpr, BS: tl.
constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr):
i_s, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
m_s = tl.where(o_i[:, None] >= o_i[None, :], 1.0, 0.0)
if HEAD_FIRST:
p_s = tl.make_block_ptr(s + i_bh * T * S, (T, S), (S, 1), (i_t * BT,
i_s * BS), (BT, BS), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * T * S, (T, S), (S, 1), (i_t * BT,
i_s * BS), (BT, BS), (1, 0))
else:
p_s = tl.make_block_ptr(s + (bos * H + i_h) * S, (T, S), (H * S, 1),
(i_t * BT, i_s * BS), (BT, BS), (1, 0))
p_o = tl.make_block_ptr(o + (bos * H + i_h) * S, (T, S), (H * S, 1),
(i_t * BT, i_s * BS), (BT, BS), (1, 0))
b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.dot(m_s, b_s, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py |
5f884ba7-afb8-4c7b-b7f8-8c0bdc99e57f | gemm_streamk_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32)], key=['M', 'N', 'K'])
@triton.jit
def first_wave(a_ptr, b_ptr, c_ptr, M: tl.constexpr, N: tl.constexpr, K: tl
.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bk:
tl.constexpr, stride_bn: tl.constexpr, stride_cm: tl.constexpr,
stride_cn: tl.constexpr, full_tiles, partial_tiles, iters_per_tile,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
start_iter = pid * full_tiles + tl.minimum(pid, partial_tiles)
last_iter = (pid + 1) * full_tiles + tl.minimum(pid + 1, partial_tiles)
while start_iter < last_iter:
end_iter = start_iter + (iters_per_tile - start_iter % iters_per_tile)
end_iter = tl.minimum(end_iter, last_iter)
mac_loop(a_ptr, b_ptr, c_ptr, M, N, K, stride_am, stride_ak,
stride_bk, stride_bn, stride_cm, stride_cn, iters_per_tile,
start_iter, end_iter, BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K,
GROUP_SIZE_M)
start_iter = end_iter
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py |
76e5f84e-91fd-40d6-b973-5c5374ec218d | triton_fused_attn.py | LouChao98/vqtree | ops/triton_fused_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q, K, V, Out, softmax_scale, stride_qb, stride_qh,
stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh,
stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] *
stride_qm + offs_d[None, :])
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] *
stride_kn + offs_d[None, :])
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] *
stride_vn + offs_d[None, :])
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) *
BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_N & EVEN_M:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n +
offs_n)[:, None] < seqlen_k, other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
if not EVEN_N:
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, NEGINF)
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :],
0, NEGINF)
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
acc_o_scale = tl.exp(m_i - m_ij)
acc_o = acc_o * acc_o_scale[:, None]
if EVEN_N & EVEN_M:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n +
offs_n)[:, None] < seqlen_k, other=0.0)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
acc_o = acc_o * o_scale[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_attn.py |
fbe7dd6a-e642-49e2-85ae-30c92f1c7dd6 | gemm_preop_exp_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M':
256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4,
'grf_mode': 'large'}, num_stages=3, num_warps=32), triton.Config({
'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K':
32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32
), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512,
'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages
=2, num_warps=32)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel_with_block_pointers(a_ptr, b_ptr, c_ptr, M: tl.constexpr,
N: tl.constexpr, K: tl.constexpr, stride_am: tl.constexpr, stride_ak:
tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.constexpr,
stride_cm: tl.constexpr, stride_cn: tl.constexpr, BLOCK_SIZE_M: tl.
constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(
stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(
stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_block_ptr, boundary_check=(0, 1))
a = a.to(tl.float32)
a = tl.math.exp(a)
a = a.to(tl.bfloat16)
b = tl.load(b_block_ptr, boundary_check=(0, 1))
accumulator += tl.dot(a, b)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
c = accumulator.to(tl.float32)
c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=(
stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n *
BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0))
tl.store(c_block_ptr, c, boundary_check=(0, 1))
| {
"Data Type": [
"bf16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py |
8fbd1eee-ae83-4f46-9244-6b0b3bf3bee6 | softmax_online_v1.py | iclementine/optimize_softmax | softmax_online_v1.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel_online_v1(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr
):
pid_m = tl.program_id(0)
m = tl.full((), value=-float('inf'), dtype=output_ptr.dtype.element_ty)
z = tl.full((), value=0, dtype=output_ptr.dtype.element_ty)
for start_n in range(0, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
new_m = tl.maximum(m, tl.max(inp, 0))
new_z = tl.exp(m - new_m) * z + tl.sum(tl.exp(inp - new_m), 0)
m = new_m
z = new_z
for start_n in range(0, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v1.py |
1370ad45-ebfb-4938-8f50-14f7af621077 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/linear_attn/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_recurrent_linear_attn_fwd_kernel(q, k, v, o, h0, ht, s_k_h, s_v_h,
scale, B, H, T, K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV:
tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.
constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK)
p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK)
p_v = v + i_bh * s_v_h + i_v * BV + tl.arange(0, BV)
p_o = o + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV)
mask_bk = i_k * BK + tl.arange(0, BK) < K
mask_bv = i_v * BV + tl.arange(0, BV) < V
mask_kv = mask_bk[None, :] & mask_bv[:, None]
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
b_h += tl.load(p_h0, mask=mask_kv, other=0).to(tl.float32)
for _ in range(0, T):
b_k = tl.load(p_k, mask=mask_bk, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_bv, other=0).to(tl.float32)
b_q = tl.load(p_q, mask=mask_bk, other=0).to(tl.float32) * scale
b_h += b_k[None, :] * b_v[:, None]
b_o = b_h * b_q[None, :]
b_o = tl.sum(b_o, axis=1)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), mask=mask_bv)
p_q += K
p_k += K
p_o += V
p_v += V
if STORE_FINAL_STATE:
p_ht = ht + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_kv)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/fused_recurrent.py |
8bddc46d-3ecf-45eb-88d1-ea5dd55dfb8c | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _kernel_quantize_fp8_block(A, A_scale, A_fp8, scale_ub, M, K, stride_am,
stride_ak, stride_om, stride_ok, stride_a_scale_m, stride_a_scale_k,
TL_FP8_DTYPE: tl.constexpr, MAX_FP8: tl.constexpr, EPS: tl.constexpr,
CLAMP_MAX: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_K: tl.constexpr
) ->None:
"""Quantize and scale each [BLOCK_M, BLOCK_K] block.
Scale per block i, j is computed as 1 / (MAX_FP8 / max(abs(A[i:i+BLOCK_M, j:j+BLOCK_K])))
Kernel naively iterates through matrix with [BLOCK_M, BLOCK_K] tiles.
Todo:
* Better tiling and ordering schemes.
Args:
A (Tensor): [M, K] higher precision input tensor.
A_scale (Tensor): [cdiv(M, BLOCK_M), cdiv(K, BLOCK_K)] reciprocal scale tensor per block.
A_fp8 (Tensor): [M, K] fp8 scaled tensor. A_fp8 = A * a_scale
scale_ub (Tensor): [1] Maximum allowed value for scale.
M (int): Number of rows.
K (int): Number of columns.
stride_am (int): Stride of m dimension of A.
stride_ak (int): Stride of k dimension of A.
stride_om (int): Stride of m dimension of output.
stride_ok (int): Stride of k dimension of output.
stride_a_scale_m (int): Stride of m dimension of A_scale.
stride_a_scale_k (int): Stride of k dimension of A_scale.
TL_FP8_DTYPE (tl.dtype): Target fp8 datatype.
MAX_FP8 (float): Maxmimum expressible value for FP8.
EPS (float): Epsilon value for numerical stability.
CLAMP_MAX (bool): Whether to apply scale_ub.
BLOCK_M (int): Block size for M dimension of A_scale and kernel.
BLOCK_K (int): Block size for K dimension of A_scale and kernel.
"""
pid = tl.program_id(0)
grid_k = tl.cdiv(K, BLOCK_K)
block_m = pid // grid_k
block_k = pid % grid_k
rm = block_m * BLOCK_M + tl.arange(0, BLOCK_M)
rk = block_k * BLOCK_K + tl.arange(0, BLOCK_K)
a_offset = rm[:, None] * stride_am + rk[None, :] * stride_ak
out_offset = rm[:, None] * stride_om + rk[None, :] * stride_ok
a_mask = (rm < M)[:, None] & (rk < K)[None, :]
a_block = tl.load(A + a_offset, mask=a_mask, other=0.0)
block_max = tl.max(tl.abs(a_block))
if CLAMP_MAX:
ub = tl.load(scale_ub)
block_max = tl.clamp(block_max, EPS, ub)
else:
block_max = tl.maximum(block_max, EPS)
scale = MAX_FP8 / block_max
tl.store(A_scale + block_m * stride_a_scale_m + block_k *
stride_a_scale_k, 1.0 / scale)
a_fp8 = a_block * scale
a_fp8 = tl.clamp(a_fp8, -MAX_FP8, MAX_FP8)
a_fp8.to(TL_FP8_DTYPE)
tl.store(A_fp8 + out_offset, a_fp8, mask=a_mask)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
26a51175-e022-44f8-9ec3-2d92fe596611 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/retention/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def parallel_retention_bwd_kernel_dq(i_bh, i_t, i_k, i_v, i_h, k, v, do, dq,
scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BS: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr):
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (0, i_k * BK),
(BS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * T * V, (V, T), (1, V), (i_v * BV, 0),
(BV, BS), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
d_b = tl.math.exp2(b_b * BS)
d_h = tl.math.exp2((BS - tl.arange(0, BS)) * b_b)
for i in range(0, i_t * BT, BS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_ds = tl.dot(b_do, b_v, allow_tf32=False) * d_h[None, :]
if i != 0:
b_dq *= d_b
b_dq += tl.dot(b_ds.to(b_v.dtype), b_k, allow_tf32=False)
p_k = tl.advance(p_k, (BS, 0))
p_v = tl.advance(p_v, (0, BS))
b_dq *= tl.math.exp2(tl.arange(0, BT) * b_b)[:, None] * scale
o_q = tl.arange(0, BT)
o_k = tl.arange(0, BS)
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * T * V, (V, T), (1, V), (i_v * BV,
i_t * BT), (BV, BS), (0, 1))
for _ in range(i_t * BT, (i_t + 1) * BT, BS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
m_s = o_q[:, None] >= o_k[None, :]
d_s = tl.where(m_s, tl.math.exp2((o_q[:, None] - o_k[None, :]) *
b_b), 0)
b_ds = tl.dot(b_do, b_v, allow_tf32=False) * d_s * scale
b_dq += tl.dot(b_ds.to(b_k.dtype), b_k, allow_tf32=False)
p_k = tl.advance(p_k, (BS, 0))
p_v = tl.advance(p_v, (0, BS))
o_k += BS
p_dq = tl.make_block_ptr(dq + (i_bh + B * H * i_v) * T * K, (T, K), (K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/parallel.py |
96564198-2f7d-486f-97bf-e992926034e9 | paged_attn_v2.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn_v2.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _single_query_cached_kv_attention_v2(exp_sums, max_logits, out, q,
k_cache, v_cache, head_mapping, scale, block_tables, seq_lens,
partiton_size, max_num_blocks_per_seq, alibi_slopes, stride_qm,
stride_qn, stride_om, stride_on, stride_ok, stride_km, stride_kn,
stride_kk, stride_exp_m, stride_exp_n, BLOCK_SIZE: tl.constexpr,
HEAD_SIZE: tl.constexpr):
seq_idx = tl.program_id(axis=1)
par_idx = tl.program_id(axis=2)
seq_len = tl.load(seq_lens + seq_idx)
if par_idx * partiton_size >= seq_len:
return
num_context_blocks = tl.cdiv(seq_len, BLOCK_SIZE)
num_blocks_per_par = partiton_size // BLOCK_SIZE
start_block_idx = par_idx * num_blocks_per_par
end_block_idx = tl.minimum(start_block_idx + num_blocks_per_par,
num_context_blocks)
head_idx = tl.program_id(axis=0)
kv_head_idx = tl.load(head_mapping + head_idx)
if alibi_slopes is None:
alibi_slope = 0.0
else:
alibi_slope = tl.load(alibi_slopes + head_idx)
block_offs = tl.arange(0, BLOCK_SIZE)
head_size_offs = tl.arange(0, HEAD_SIZE)
q = tl.load(q + seq_idx * stride_qm + head_idx * stride_qn + head_size_offs
)
q = (q * scale).to(tl.float16)
qkv = tl.zeros([BLOCK_SIZE, HEAD_SIZE], dtype=tl.float32)
qk_max = float('-inf')
exp_sum = 0.0
fp16_0 = tl.zeros([1, 1], dtype=k_cache.dtype.element_ty)
base_offs_kv = kv_head_idx * stride_kn + block_offs[:, None
] * stride_kk + head_size_offs[None, :]
for block_idx in range(start_block_idx, end_block_idx):
physical_block_idx = tl.load(block_tables + seq_idx *
max_num_blocks_per_seq + block_idx)
mask = (block_offs[:, None] < seq_len - block_idx * BLOCK_SIZE) & (
head_size_offs[None, :] < HEAD_SIZE)
offs_kv = physical_block_idx * stride_km + base_offs_kv
k = tl.load(k_cache + offs_kv, mask=mask, other=fp16_0)
v = tl.load(v_cache + offs_kv, mask=mask, other=fp16_0)
_qk = tl.sum((q[None, :] * k).to(tl.float32), axis=1)
_qk += alibi_slope * (block_idx * BLOCK_SIZE + block_offs - seq_len + 1
)
_qk_max = tl.maximum(tl.max(_qk, axis=0), qk_max)
qk = tl.where(block_offs[:, None] < seq_len - block_idx *
BLOCK_SIZE, _qk[:, None], float('-inf'))
_exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(tl.exp(_qk -
_qk_max), axis=0)
qkv = qkv * (exp_sum * tl.exp(qk_max - _qk_max) / _exp_sum) + tl.exp(
qk - _qk_max) / _exp_sum * v
qk_max = _qk_max
exp_sum = _exp_sum
offs_exp = seq_idx * stride_exp_m + head_idx * stride_exp_n + par_idx
tl.store(exp_sums + offs_exp, exp_sum)
tl.store(max_logits + offs_exp, qk_max)
offs_out = (seq_idx * stride_om + head_idx * stride_on + par_idx *
stride_ok + head_size_offs)
tl.store(out + offs_out, tl.sum(qkv, axis=0))
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn_v2.py |
b70199cf-0316-4688-b762-de62481ca266 | bwd_split_kernel.py | ROCm/aotriton | test/bwd_split_kernel.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def bwd_kernel_dq(Q, K, V, sm_scale, Out, DO, DQ, L, D, stride_qz,
stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn,
stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, seqlen_q,
seqlen_k, dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl.
constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL:
tl.constexpr, ENABLE_DROPOUT: tl.constexpr):
start_m = tl.program_id(0) * BLOCK_M
off_h = tl.program_id(1)
off_z = tl.program_id(2)
num_h = tl.num_programs(1)
num_z = tl.num_programs(2)
offs_m = start_m + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
q_offset = off_h * stride_qh + off_z * stride_qz
Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(seqlen_q,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
k_offset = off_h * stride_kh + off_z * stride_kz
K_block_ptr = tl.make_block_ptr(base=K + k_offset, shape=(BLOCK_DMODEL,
seqlen_k), strides=(stride_kk, stride_kn), offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
v_offset = off_h * stride_vh + off_z * stride_vz
V_block_ptr = tl.make_block_ptr(base=V + v_offset, shape=(BLOCK_DMODEL,
seqlen_k), strides=(stride_vn, stride_vk), offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
DO_block_ptr = tl.make_block_ptr(base=DO + q_offset, shape=(seqlen_q,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
off_zh = off_z * num_h + off_h * 1
D_ptrs = D + off_zh * seqlen_q
l_ptrs = L + off_zh * seqlen_q
qk_scale = sm_scale * 1.44269504
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(Q_block_ptr.type.element_ty)
do = tl.load(DO_block_ptr)
Di = tl.load(D_ptrs + offs_m)
l_i = tl.load(l_ptrs + offs_m)
dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
lo = 0
hi = min(start_m + BLOCK_M, seqlen_k) if CAUSAL else seqlen_k
batch_philox_offset = philox_offset_base + off_zh * seqlen_q * seqlen_k
"""
K1 K2 (d)V dO
Q1 qk11 qk12 (d)v1 dO1
Q2 qk21 qk22 (d)v2 dO2
QK: (seqlen_q, seqlen_k)
dO: (seqlen_q, hdim)
dV: (seqlen_k, hdim)
"""
for start_n in range(lo, hi, BLOCK_N):
kt = tl.load(K_block_ptr)
vt = tl.load(V_block_ptr)
qk = dot(BLOCK_M, BLOCK_DMODEL, BLOCK_DMODEL, q, kt)
if CAUSAL:
qk = tl.where(offs_m[:, None] >= offs_n[None, :] + start_n, qk,
float('-inf'))
p = tl.math.exp2(qk - l_i[:, None])
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
dp += dot(BLOCK_M, BLOCK_DMODEL, BLOCK_DMODEL, do, vt)
if ENABLE_DROPOUT:
philox_offset = batch_philox_offset + start_m * seqlen_k + start_n
keep = dropout_mask(philox_seed, philox_offset, dropout_p,
BLOCK_M, BLOCK_N, seqlen_k)
dp = tl.where(keep, dp / (1 - dropout_p), 0)
ds = p * (dp - Di[:, None])
if BLOCK_M == 1:
dq += tl.view(kt, [BLOCK_DMODEL]) * ds.to(Q.type.element_ty)
else:
dq += tl.dot(ds.to(Q.type.element_ty), tl.trans(kt))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (0, BLOCK_N))
DQ_block_ptr = tl.make_block_ptr(base=DQ + q_offset, shape=(seqlen_q,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
tl.store(DQ_block_ptr, (dq * sm_scale).to(DQ_block_ptr.type.element_ty))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation",
"Matrix Multiplication",
"Softmax",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/bwd_split_kernel.py |
01fdb3bd-4c8f-4387-a609-7db94baec8e2 | geglu.py | Kitsunetic/kitsu | kitsu/nn/geglu.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def cosh(x):
return (tl.exp(x) + tl.exp(-x)) * 0.5
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Non-Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/geglu.py |
5abc2eaa-cbaa-4445-b0ea-d0fd89b0f5ac | chunk_fuse.py | elephantmipt/rebased_minimal | flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py | e7b945509972fab9f9c1c7be431abf7d6bf62c95 | 0 | @triton.jit
def chunk_abc_bwd_kernel_dq(k, rk, ck, dq, ds, s_qk_h, s_qk_t, s_qk_d,
s_sk_h, s_sk_t, s_sk_m, T, BT: tl.constexpr, BK: tl.constexpr, BM: tl.
constexpr, DK: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr):
i_k, i_m, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
n_bh = tl.num_programs(2)
p_k = tl.make_block_ptr(k + i_bh * s_qk_h, (T, DK), (s_qk_t, s_qk_d), (
0, i_k * BK), (BT, BK), (1, 0))
p_rk = tl.make_block_ptr(rk + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,),
(i_m * BM,), (BM,), (0,))
p_ck = tl.make_block_ptr(ck + i_bh * s_sk_h, (DM, T), (s_sk_m, s_sk_t),
(i_m * BM, 0), (BM, BT), (0, 1))
p_dq = tl.make_block_ptr(dq + (i_m * n_bh + i_bh) * s_qk_h, (T, DK), (
s_qk_t, s_qk_d), (0, i_k * BK), (BT, BK), (1, 0))
p_ds = tl.make_block_ptr(ds + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m),
(0, i_m * BM), (BT, BM), (1, 0))
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_hk = tl.zeros([BM, BK], dtype=tl.float32)
for _ in range(NT):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_rk = tl.load(p_rk, boundary_check=(0,))
b_ck = tl.load(p_ck, boundary_check=(0, 1))
b_ds = tl.load(p_ds, boundary_check=(0, 1))
b_inter = tl.dot((b_ds * b_rk[None, :]).to(b_k.dtype), b_hk.to(b_k.
dtype), allow_tf32=False)
b_intra = tl.dot(tl.where(m_s, tl.dot(b_ds, b_ck, allow_tf32=False),
0).to(b_k.dtype), b_k, allow_tf32=False)
b_dq = b_inter + b_intra
b_hk = b_hk * b_rk[:, None] + tl.dot(b_ck, b_k, allow_tf32=False)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
p_k = tl.advance(p_k, (BT, 0))
p_rk = tl.advance(p_rk, (DM,))
p_ck = tl.advance(p_ck, (0, BT))
p_dq = tl.advance(p_dq, (BT, 0))
p_ds = tl.advance(p_ds, (BT, 0))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py |
4eae1f4c-27a6-472e-83b1-3c5bb1e82ddf | bwd_preprocess.py | ROCm/aotriton | test/bwd_preprocess.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def bwd_preprocess(Out, DO, Delta, stride_oz, stride_oh, stride_om,
stride_on, stride_doz, stride_doh, stride_dom, stride_don, seqlen_q,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr):
off_m = tl.program_id(0) * BLOCK_M
off_h = tl.program_id(1)
off_z = tl.program_id(2)
num_h = tl.num_programs(1)
o_offset = off_h * stride_oh + off_z * stride_oz
O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(seqlen_q,
D_HEAD), strides=(stride_om, stride_on), offsets=(off_m, 0),
block_shape=(BLOCK_M, D_HEAD), order=(1, 0))
do_offset = off_h * stride_doh + off_z * stride_doz
DO_block_ptr = tl.make_block_ptr(base=DO + do_offset, shape=(seqlen_q,
D_HEAD), strides=(stride_dom, stride_don), offsets=(off_m, 0),
block_shape=(BLOCK_M, D_HEAD), order=(1, 0))
o = tl.load(O_block_ptr).to(tl.float32)
do = tl.load(DO_block_ptr).to(tl.float32)
delta = tl.sum(o * do, axis=1)
off_zh = off_z * num_h + off_h * 1
tl.store(Delta + off_zh * seqlen_q + off_m + tl.arange(0, BLOCK_M), delta)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/bwd_preprocess.py |
b3fcb89f-85fb-4bab-969a-d45fd9c5cb9e | ln_linear_triton.py | ethansmith2000/fused-layer-norm | ln_linear_triton.py | 84fe243a829364acdcfd7cd70b699db04838af0f | 0 | @triton.jit
def _layer_norm_fwd_fused(X_ptr, Y_ptr, W_ptr, B_ptr, Mean_ptr, RSTD_ptr,
stride, n_cols, eps, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
Y_ptr += row_idx * stride
X_ptr += row_idx * stride
Mean_ptr += row_idx
RSTD_ptr += row_idx
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0)
W_row = tl.load(W_ptr + col_offsets, mask=mask, other=0)
B_row = tl.load(B_ptr + col_offsets, mask=mask, other=0)
mean = tl.sum(X_row, axis=0) / n_cols
demeaned = X_row - mean
var = tl.sum(demeaned * demeaned, axis=0) / n_cols
rstd = rsqrt(var + eps)
tl.store(Mean_ptr, mean)
tl.store(RSTD_ptr, rstd)
Y_row = tl.fma(demeaned * rstd, W_row, B_row)
tl.store(Y_ptr + col_offsets, Y_row, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ethansmith2000/fused-layer-norm/blob/84fe243a829364acdcfd7cd70b699db04838af0f/ln_linear_triton.py |
be6a1d21-b2db-4870-b6b3-b6ac680f6a72 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def mlstm_triton_scan(Q, K, V, F, I, O, F_REDUCED, C, N, M, H, NH: tl.
constexpr, S: tl.constexpr, D: tl.constexpr, SB: tl.constexpr, VB: tl.
constexpr):
bh_id = tl.program_id(0)
sb_id = tl.program_id(1)
vb_id = tl.program_id(2)
num_sequence_blocks = tl.num_programs(1)
batch_id = bh_id // NH
head_id = bh_id % NH
v_range = tl.arange(0, VB) + vb_id * VB
v_range_3d = v_range[None, :, None]
k_range = tl.arange(0, VB)
sb_range = tl.arange(0, SB)
sb_range_2d = tl.arange(0, SB)[:, None]
sb_range_3d = tl.arange(0, SB)[:, None, None]
sb_range_offset = tl.arange(0, SB) + sb_id * SB
sb_range_offset_2d = sb_range_offset[:, None]
batch_offset_fi = batch_id * NH * S + head_id * S
batch_offset_f_reduced = (batch_id * NH * num_sequence_blocks + head_id *
num_sequence_blocks)
batch_offset_qkv = batch_id * NH * S * D + head_id * S * D
batch_offset_n = (batch_id * NH * num_sequence_blocks * D + head_id *
num_sequence_blocks * D)
batch_offset_c = (batch_id * NH * num_sequence_blocks * D * D + head_id *
num_sequence_blocks * D * D)
f = tl.load(F + batch_offset_fi + sb_range_offset, sb_range_offset < S)
i = tl.load(I + batch_offset_fi + sb_range_offset, sb_range_offset < S)
f_reduced_range = batch_offset_f_reduced + sb_range * 0 + sb_id - 1
f_reduced_mask = (sb_range == 0) & (sb_id != 0)
f_reduced = tl.load(F_REDUCED + f_reduced_range, f_reduced_mask, other=1.0)
vo_range = batch_offset_qkv + sb_range_offset_2d * D + v_range[None, :]
vo_mask = (sb_range_offset_2d < S) & (v_range[None, :] < D)
v = tl.load(V + vo_range, vo_mask)
normalizer = tl.zeros((SB,), dtype=tl.float32)
h = tl.zeros((SB, VB), dtype=tl.float32)
k_scale_factor = tl.sqrt(tl.full((1,), D, dtype=tl.float32))
for j in tl.range(tl.cdiv(D, VB)):
k_range_ = batch_offset_qkv + sb_range_offset_2d * D + k_range[None, :]
k_mask = (sb_range_offset_2d < S) & (k_range[None, :] < D)
k = tl.load(K + k_range_, k_mask) / k_scale_factor
q = tl.load(Q + k_range_, k_mask)
c_range = batch_offset_c + sb_range_3d + (sb_id - 1
) * D * D + v_range_3d * D + k_range[None, None, :]
c_mask = (sb_range_3d == 0) & (v_range_3d < D) & (k_range[None,
None, :] < D) & (sb_id != 0)
c_reduced = tl.load(C + c_range, c_mask, other=0)
vk = v[:, :, None] * k[:, None, :] * i[:, None, None]
f_tmp, vk_tmp = scan_op(f_reduced[:, None, None], c_reduced, f[:,
None, None], vk)
f_tmp = tl.broadcast_to(f_tmp, (SB, VB, VB))
_, c = tl.associative_scan((f_tmp, vk_tmp), 0, scan_op)
h += tl.sum(c * q[:, None, :], -1)
n_range = batch_offset_n + sb_range_2d + (sb_id - 1) * D + k_range[
None, :]
n_mask = (sb_range_2d == 0) & (k_range[None, :] < D) & (sb_id != 0)
n = tl.load(N + n_range, n_mask, other=0.0)
f_tmp, n_tmp = scan_op(f_reduced[:, None], n, f[:, None], k * i[:,
None])
_, n = tl.associative_scan((tl.broadcast_to(f_tmp, (SB, VB)), n_tmp
), 0, scan_op)
normalizer += tl.sum(n * q, -1)
k_range += VB
m = tl.load(M + batch_offset_fi + sb_range_offset, sb_range_offset < S)
o = tl.load(O + vo_range, vo_mask)
normalizer = tl.maximum(tl.abs(normalizer), tl.exp(-m))[:, None] + 1e-06
h = h / normalizer * silu(o)
tl.store(H + vo_range, h, vo_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Attention Mechanisms",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
8e56e989-ac16-4909-9f0e-4ab0f6c13dae | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardswish', 'selu', 'mish', and 'leaky_relu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param,
act_func, False)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
fdd2e0b1-dd78-4f2b-8862-5b13f1fc1f97 | k_layer_norm.py | cpuhrsch/torchfused | torchfused/triton/k_layer_norm.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _affine(W, B, N, x, META):
cols = tl.arange(0, META['BLOCK_SIZE_N'])
w = tl.load(W + cols, mask=cols < N, other=1.0)
zero = 0.0
zero = zero.to(w.dtype)
w = tl.where(cols < N, w, zero)
b = tl.load(B + cols, mask=cols < N, other=0.0)
b = tl.where(cols < N, b, zero)
y = x * w + b
return y
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.