uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
6d32b21d-530f-4d1d-a2b2-bb08a0c056cc | triton_matric_matmul.py | elphinkuo/fast_matrix_multiplication | dot_product/on_gpu/Triton/triton_matric_matmul.py | 4e875a17e95b7ccf9af102d2c0f8cc31ed9a29f3 | 0 | @triton.jit
def _matmul_kernel(A, B, C, M, N, K, **meta):
TILE_M = meta['BLOCK_M']
TILE_N = meta['BLOCK_N']
TILE_K = 128
m = tl.program_id(0) * TILE_M + tl.arange(0, TILE_M)
n = tl.program_id(1) * TILE_N + tl.arange(0, TILE_N)
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(0, K, TILE_K):
a = tl.load(A + m[:, None] * K + k, mask=[m[:, None] < M, None],
other=0.0)
b = tl.load(B + k * N + n, mask=[None, n < N], other=0.0)
acc += tl.dot(a, b)
tl.store(C + m[:, None] * N + n, acc, mask=[m[:, None] < M, n < N])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/elphinkuo/fast_matrix_multiplication/blob/4e875a17e95b7ccf9af102d2c0f8cc31ed9a29f3/dot_product/on_gpu/Triton/triton_matric_matmul.py |
1325bcba-7d28-47b5-b18b-31e0f79878f5 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_2_softmax_kernel(input_ptr, output_ptr, offsets_row_ptr,
offsets_col_ptr, offsets_overall_ptr, input_stride, output_stride,
transpose, max_seq_len_row, max_seq_len_col, BLOCK_SIZE: tl.constexpr):
"""
input shape is [sum_B(Ni * Hi)]
output shape is [sum_B(Ni * Hi)]
Padded version = [B, N, H]
Calculate softmax alone N dim
Each kernel calulates softmax for 1 sample and 1 head
offsets_row.size == offsets_col.size == offsets_overall.size
"""
pid_batch = tl.program_id(0)
pid_head = tl.program_id(1)
begin = tl.load(offsets_overall_ptr + pid_batch)
if transpose:
N = tl.load(offsets_row_ptr + pid_batch + 1) - tl.load(
offsets_row_ptr + pid_batch)
H = tl.load(offsets_col_ptr + pid_batch + 1) - tl.load(
offsets_col_ptr + pid_batch)
stride_n = H
stride_h = H // H
H = tl.minimum(max_seq_len_col, H)
N = tl.minimum(max_seq_len_row, N)
else:
N = tl.load(offsets_col_ptr + pid_batch + 1) - tl.load(
offsets_col_ptr + pid_batch)
H = tl.load(offsets_row_ptr + pid_batch + 1) - tl.load(
offsets_row_ptr + pid_batch)
stride_h = N
stride_n = N // N
H = tl.minimum(max_seq_len_row, H)
N = tl.minimum(max_seq_len_col, N)
if pid_head >= H:
return
if H == 0 or N == 0:
return
start_ptr = input_ptr + begin * input_stride
offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = (start_ptr + offsets * input_stride * stride_n + pid_head *
input_stride * stride_h)
row = tl.load(input_ptrs, mask=offsets < N, other=-float('inf'))
row_mins_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_mins_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_start_ptr = output_ptr + begin * output_stride
output_ptrs = (output_start_ptr + offsets * output_stride * stride_n +
pid_head * output_stride * stride_h)
tl.store(output_ptrs, softmax_output, mask=offsets < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
c5681889-cbdf-4c8c-b730-916ee6ccb0d9 | triton_ops.py | huyz2023/2by4-pretrain | sparse/triton_ops.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _MVUE24_approx_triton(dense_ptr, sparse_ptr, dense_row_stride,
sparse_row_stride, dense_col_stride, sparse_col_stride, m, k, seed,
BLOCK_SIZE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr):
if ARRAY_LAYOUT == 'row':
row_idx = tl.program_id(0)
col_idx = tl.program_id(1) * 4 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE
) * 4
mask = col_idx < k
elif ARRAY_LAYOUT == 'col':
row_idx = tl.arange(0, BLOCK_SIZE) + tl.program_id(0) * BLOCK_SIZE
col_idx = tl.program_id(1) * 4
mask = row_idx < m
dense_40 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
0) * dense_col_stride, mask=mask)
dense_41 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
1) * dense_col_stride, mask=mask)
dense_42 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
2) * dense_col_stride, mask=mask)
dense_43 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
3) * dense_col_stride, mask=mask)
if ARRAY_LAYOUT == 'row':
seed0 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2
seed1 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2 + 1
else:
seed0 = seed + (tl.program_id(0) * k // 16 + tl.program_id(1)) * 2
seed1 = seed + (tl.program_id(0) * k // 16 + tl.program_id(1)) * 2 + 1
random0 = tl.rand(seed0, tl.arange(0, BLOCK_SIZE), n_rounds=5)
random1 = tl.rand(seed1, tl.arange(0, BLOCK_SIZE), n_rounds=5)
dense_40, dense_41, dense_42, dense_43, m0, m1, m2, m3 = _MVUE24_approx(
dense_40, dense_41, dense_42, dense_43, random0, random1)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 0) *
sparse_col_stride, dense_40, mask=mask & m0)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 1) *
sparse_col_stride, dense_41, mask=mask & m1)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 2) *
sparse_col_stride, dense_42, mask=mask & m2)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 3) *
sparse_col_stride, dense_43, mask=mask & m3)
| {
"Data Type": [
"fp32",
"uint8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/triton_ops.py |
ae2cb3a3-be88-4b7a-bbba-b418ac601259 | parallel_scan.py | chengkai-liu/RecBLR | parallel_scan.py | 66e520c26e28c05a5425ba2e81c9169b7e0176e2 | 0 | @triton.jit
def unpack64(merged):
tl.static_assert(merged.dtype == tl.uint64)
b = (merged & 4294967295).to(tl.uint32).to(tl.float32, bitcast=True)
a = (merged >> 32).to(tl.uint32).to(tl.float32, bitcast=True)
return a, b
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py |
f031d842-9ab5-40cf-b113-7fe0ef2ae51e | y_5.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_5.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def fifth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset <
output_numel)
g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask=
output_row_offset + 7 < output_numel)
g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask=
output_row_offset + 8 < output_numel)
g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask=
output_row_offset + 9 < output_numel)
g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask=
output_row_offset + 10 < output_numel)
CONST000 = 1.60565407233314
CONST001 = 3.0
CONST002 = 3.21130814466628
CONST003 = 1.60565407233314
CONST004 = 6.42261628933256
CONST005 = 6.42261628933256
CONST006 = 8.67152307844476
CONST007 = 8.02827036166571
CONST008 = 6.9372184627558
CONST009 = 11.6340690431164
CONST010 = 12.8452325786651
CONST011 = 6.21867148191637
CONST012 = 6.21867148191637
CONST014 = 12.4373429638327
CONST017 = 12.8452325786651
CONST018 = 13.8744369255116
CONST019 = 24.8746859276655
CONST020 = 24.8746859276655
CONST021 = 27.7488738510232
CONST024 = 29.4321253055229
CONST027 = 7.35803132638072
CONST029 = 46.5362761724657
CONST030 = 51.3809303146605
CONST031 = 51.3809303146605
CONST034 = 101.955872807799
CONST036 = -8.67152307844475
CONST037 = 3.4686092313779
CONST038 = -88.2963759165686
CONST039 = -83.2466215530696
CONST040 = -69.8044142586986
CONST041 = -50.9779364038993
CONST042 = -50.9779364038993
CONST043 = -46.5362761724657
CONST044 = -44.1481879582843
CONST045 = -41.6233107765348
CONST046 = -38.5356977359954
CONST047 = -38.5356977359954
CONST048 = -33.166247903554
CONST049 = -33.9852909359329
CONST050 = 6.42261628933257
CONST051 = -33.9852909359329
CONST052 = -29.4321253055229
CONST053 = -27.7488738510232
CONST054 = -20.8116553882674
CONST055 = -19.2678488679977
CONST056 = -19.2678488679977
CONST057 = -16.9926454679664
CONST058 = -16.9926454679664
CONST059 = -13.8744369255116
CONST060 = -16.583123951777
CONST061 = -8.49632273398321
CONST062 = -6.9372184627558
CONST063 = -5.20291384706685
CONST064 = -3.4686092313779
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += g_0 * (CONST009 * VAR06 + CONST009 * VAR24 + CONST040 * VAR08 *
VAR26) + g_1 * y * (CONST038 * VAR08 * z - CONST052 * VAR25) + g_10 * (
CONST029 * VAR07 * z + CONST043 * VAR25 * x) + g_2 * (CONST001 *
VAR08 * (CONST059 * VAR17 + CONST064 * VAR26) + CONST006 * VAR06 -
CONST045 * VAR17 * VAR26 + CONST063 * VAR24) + g_3 * (CONST041 *
VAR08 * y * z - CONST049 * VAR16 * z + CONST057 * VAR25 * y) + g_4 * (
CONST000 * VAR24 + CONST001 * VAR08 * (CONST002 * VAR26 + CONST055 *
VAR17) + CONST007 * VAR06 + CONST010 * VAR15 + CONST056 * VAR17 * VAR26
) + g_5 * (CONST048 * VAR16 * x + y * (CONST019 * VAR07 + CONST019 *
VAR26 * x)) + g_6 * (CONST005 * VAR25 * x + z * (CONST004 * VAR07 +
CONST046 * VAR17 * x)) + g_7 * (CONST049 * VAR16 * x - CONST051 *
VAR07 * y) + g_8 * (CONST008 * VAR25 * x + z * (CONST039 * VAR17 *
x - CONST054 * VAR07)) + g_9 * y * (CONST024 * VAR07 + CONST038 *
VAR26 * x)
g_y += g_1 * (CONST052 * VAR07 * z - CONST052 * VAR25 * x) + g_2 * (-
CONST039 * VAR26 * x * y + CONST053 * VAR07 * y) + g_3 * (CONST058 *
VAR07 * z + x * (CONST034 * VAR17 * z + CONST057 * VAR25)) + g_4 * (
CONST047 * VAR07 * y + x * (CONST030 * VAR16 + CONST046 * VAR26 * y)
) + g_5 * (CONST001 * VAR17 * (CONST060 * VAR08 + CONST060 * VAR26) +
CONST011 * VAR06 + CONST012 * VAR24 + CONST014 * VAR08 * VAR26 -
CONST060 * VAR15) + g_6 * (CONST046 * VAR25 * y + z * (CONST031 *
VAR16 + CONST046 * VAR08 * y)) + g_7 * (CONST001 * VAR17 * (
CONST057 * VAR08 - CONST057 * VAR26) - CONST061 * VAR06 + CONST061 *
VAR24) + g_8 * (CONST021 * VAR25 * y + CONST039 * VAR08 * y * z
) + g_9 * (CONST027 * VAR06 + CONST027 * VAR24 + CONST044 * VAR08 *
VAR26)
g_z += g_0 * (CONST029 * VAR25 * x + CONST043 * VAR07 * z) + g_1 * y * (
-CONST038 * VAR26 * x + CONST052 * VAR07) + g_10 * (CONST009 *
VAR06 + CONST009 * VAR24 + CONST040 * VAR08 * VAR26) + g_2 * (
CONST062 * VAR07 * z + x * (-CONST039 * VAR17 * z + CONST054 * VAR25)
) + g_3 * (CONST058 * VAR07 * y + x * (CONST042 * VAR26 * y -
CONST049 * VAR16)) + g_4 * (CONST005 * VAR07 * z + x * (CONST046 *
VAR17 * z + CONST050 * VAR25)) + g_5 * (CONST048 * VAR16 * z + y *
(CONST019 * VAR08 * z + CONST020 * VAR25)) + g_6 * (CONST001 *
VAR26 * (CONST002 * VAR08 + CONST056 * VAR17) + CONST003 * VAR06 +
CONST007 * VAR24 + CONST017 * VAR15 + CONST056 * VAR08 * VAR17
) + g_7 * (-CONST049 * VAR16 * z + CONST051 * VAR25 * y) + g_8 * (
CONST001 * VAR26 * (CONST018 * VAR17 + CONST037 * VAR08) + CONST036 *
VAR24 + CONST045 * VAR08 * VAR17 - CONST063 * VAR06) + g_9 * y * (
CONST024 * VAR25 + CONST038 * VAR08 * z)
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_5.py |
e4b5235d-37a5-4b17-8c48-fa82e3aecf4f | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.autotune(configs=[triton.Config({'UNROLL_FACTOR': uf}) for uf in [1,
2, 4, 8]], key=['POWER_OF_2_MAX_SEQ_LEN', 'QUERY_GROUP_SIZE',
'USE_PARTITIONING', 'BLOCK_SIZE', 'HEAD_SIZE', 'PARTITION_SIZE'])
@triton.jit
def _paged_attn_wo_mma_kernel(exp_sums, max_logits, out, q, k_cache,
v_cache, scale, block_tables, seq_lens, max_num_blocks_per_seq,
alibi_slopes, stride_qm, stride_qn, stride_om, stride_on, stride_ok,
stride_km, stride_kn, stride_kk, stride_exp_m, stride_exp_n, BLOCK_SIZE:
tl.constexpr, HEAD_SIZE: tl.constexpr, QUERY_GROUP_SIZE: tl.constexpr,
PARTITION_SIZE: tl.constexpr, POWER_OF_2_MAX_SEQ_LEN: tl.constexpr,
USE_PARTITIONING: tl.constexpr, UNROLL_FACTOR: tl.constexpr):
head_idx = tl.program_id(axis=0)
kv_head_idx = head_idx // QUERY_GROUP_SIZE
seq_idx = tl.program_id(axis=1)
par_idx = tl.program_id(axis=2)
seq_len = tl.load(seq_lens + seq_idx)
if par_idx * PARTITION_SIZE >= seq_len:
return
num_context_blocks = tl.cdiv(seq_len, BLOCK_SIZE)
if USE_PARTITIONING:
num_blocks_per_par = PARTITION_SIZE // BLOCK_SIZE
start_block_idx = par_idx * num_blocks_per_par
end_block_idx = tl.minimum(start_block_idx + num_blocks_per_par,
num_context_blocks)
else:
start_block_idx = 0
end_block_idx = num_context_blocks
if alibi_slopes is None:
alibi_slope = 0.0
else:
alibi_slope = tl.load(alibi_slopes + head_idx)
block_offs = tl.arange(0, BLOCK_SIZE)
head_size_offs = tl.arange(0, HEAD_SIZE)
q = tl.load(q + seq_idx * stride_qm + head_idx * stride_qn + head_size_offs
)
q = (q * scale).to(tl.float16)
qkv = tl.zeros([BLOCK_SIZE, HEAD_SIZE], dtype=tl.float32)
qk_max = float('-inf')
exp_sum = 0.0
fp16_0 = tl.zeros([1, 1], dtype=k_cache.dtype.element_ty)
base_offs_kv = kv_head_idx * stride_kn + block_offs[:, None
] * stride_kk + head_size_offs[None, :]
block_base_ptrs = block_tables + seq_idx * max_num_blocks_per_seq
hi_unroll = (end_block_idx - 1) // UNROLL_FACTOR * UNROLL_FACTOR
if UNROLL_FACTOR == 1:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_0_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
elif UNROLL_FACTOR == 2:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_2_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
elif UNROLL_FACTOR == 4:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_4_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
elif UNROLL_FACTOR == 8:
qkv, qk_max, exp_sum = _inner_paged_attn_unroll_8_kernel(q, k_cache,
v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope,
block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE,
start_block_idx, hi_unroll)
tl.debug_barrier()
for block_idx in range(hi_unroll, end_block_idx):
physical_block_idx = tl.load(block_tables + seq_idx *
max_num_blocks_per_seq + block_idx)
mask = block_offs[:, None] < seq_len - block_idx * BLOCK_SIZE
offs_kv = physical_block_idx * stride_km + base_offs_kv
k = tl.load(k_cache + offs_kv, mask=mask, other=fp16_0)
v = tl.load(v_cache + offs_kv, mask=mask, other=fp16_0)
_qk = tl.sum((q[None, :] * k).to(tl.float32), axis=1)
_qk = tl.where(block_offs < seq_len - block_idx * BLOCK_SIZE, _qk,
float('-inf'))
_qk += alibi_slope * (block_idx * BLOCK_SIZE + block_offs - seq_len + 1
)
_qk_max = tl.maximum(tl.max(_qk, axis=0), qk_max)
_exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(tl.exp(_qk -
_qk_max), axis=0)
qkv = qkv * (exp_sum * tl.exp(qk_max - _qk_max)) + tl.exp(_qk[:,
None] - _qk_max) * v
qkv = qkv / _exp_sum
qk_max = _qk_max
exp_sum = _exp_sum
if USE_PARTITIONING:
offs_exp = seq_idx * stride_exp_m + head_idx * stride_exp_n + par_idx
tl.store(exp_sums + offs_exp, exp_sum)
tl.store(max_logits + offs_exp, qk_max)
offs_out = (seq_idx * stride_om + head_idx * stride_on + par_idx *
stride_ok + head_size_offs)
tl.store(out + offs_out, tl.sum(qkv, axis=0))
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
cd64708f-5721-4a20-be92-b7c64e1762ca | GELUglu.py | huyz2023/2by4-pretrain | sparse/GELUglu.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _gelu_glu_fwd_kernel(output_ptr, input_ptr, output_row_stride,
input_row_stride, output_col_stride, input_col_stride,
output_page_stride, input_page_stride, n_pages, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_idx = tl.program_id(1)
x = tl.load(input_ptr + row_idx * input_row_stride + col_idx *
input_col_stride + tl.arange(0, BLOCK_SIZE // 2) *
input_page_stride, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages //
2, other=-float('inf'))
gate = tl.load(input_ptr + row_idx * input_row_stride + col_idx *
input_col_stride + (tl.arange(0, BLOCK_SIZE // 2) + n_pages // 2) *
input_page_stride, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages //
2, other=-float('inf'))
gate_cube = gate * gate * gate
beta = 0.7978845608028654
kappa = 0.044715
inner = beta * (gate + kappa * gate_cube)
inner_tanh = tanh(inner)
gate_gelu = 0.5 * gate * (inner_tanh + 1)
gelu_glu = gate_gelu * x
tl.store(output_ptr + row_idx * output_row_stride + col_idx *
output_col_stride + tl.arange(0, BLOCK_SIZE // 2) *
output_page_stride, gelu_glu, mask=tl.arange(0, BLOCK_SIZE // 2) <
n_pages // 2)
| {
"Data Type": [],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py |
e3441201-2cc6-4bc0-b20c-0cd97d2fe333 | triton_welford.py | pytorch-labs/tritonbench | tritonbench/operators/welford/triton_welford.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'XBLOCK': 1, 'RBLOCK': 1024},
num_stages=1, num_warps=8), triton.Config({'XBLOCK': 1, 'RBLOCK': 2048},
num_stages=1, num_warps=8)], key=['xnumel', 'rnumel'])
@triton.jit
def triton_red_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask, eviction_policy
='evict_last').to(tl.float32)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr0 + x0, tmp3, None)
tmp6 = rnumel
tmp7 = tmp4 / tmp6
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp10, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp11 = tl.load(in_ptr0 + (r1 + rnumel * x0), rmask,
eviction_policy='evict_first').to(tl.float32)
tmp15 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last').to(
tl.float32)
tmp18 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last').to(
tl.float32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 - tmp3
tmp14 = tmp13 * tmp10
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 * tmp16
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 + tmp19
tmp21 = tmp20.to(tl.float32)
tl.store(out_ptr1 + (r1 + rnumel * x0), tmp21, rmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/welford/triton_welford.py |
b6bbdca6-8c98-4528-a967-b358c90a1d6f | triton_fused_local_attn.py | LouChao98/vqtree | ops/triton_fused_local_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, sm_scale, K_block_ptr, V_block_ptr,
start_m, offs_m, offs_n, SEQLEN_K: tl.constexpr, WINDOW_SIZE: tl.
constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_MN: tl.
constexpr, STAGE: tl.constexpr):
if STAGE == 1:
hi = start_m * BLOCK_M - WINDOW_SIZE + BLOCK_M
lo = start_m * BLOCK_M - WINDOW_SIZE
if hi < 0:
hi = 0
if lo < 0:
lo = 0
elif STAGE == 2:
hi = start_m * BLOCK_M
lo = start_m * BLOCK_M - WINDOW_SIZE + BLOCK_M
if lo < 0:
lo = 0
else:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
hi = min(hi, SEQLEN_K)
EVEN_MASK_FREE = EVEN_MN & ((STAGE == 1) | (STAGE == 2))
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_MASK_FREE:
k = tl.load(K_block_ptr)
else:
k = tl.load(K_block_ptr, boundary_check=(1,), padding_option='zero'
)
qk = tl.dot(q, k) * (sm_scale * RCP_LN2)
if STAGE == 1:
mask = offs_m[:, None] <= start_n + WINDOW_SIZE + offs_n[None, :]
qk += tl.where(mask, 0, NEGINF)
elif STAGE == 3:
mask = offs_m[:, None] >= start_n + offs_n[None, :]
qk += tl.where(mask, 0, NEGINF)
if not EVEN_MASK_FREE:
qk += tl.where((start_n + offs_n)[None, :] < SEQLEN_K, 0, NEGINF)
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
acc *= alpha[:, None]
if EVEN_MASK_FREE:
v = tl.load(V_block_ptr)
else:
v = tl.load(V_block_ptr, boundary_check=(1,), padding_option='zero'
)
acc += tl.dot(p.to(V_block_ptr.dtype.element_ty), v)
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
return acc, l_i, m_i
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn.py |
362d6816-df21-46a1-b625-bc3f25aab424 | 06-fused-attention.py | triton-lang/triton | python/tutorials/06-fused-attention.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _attn_bwd_dkdv(dk, dv, Q, k, v, sm_scale, DO, M, D, stride_tok,
stride_d, H, N_CTX, BLOCK_M1: tl.constexpr, BLOCK_N1: tl.constexpr,
HEAD_DIM: tl.constexpr, start_n, start_m, num_steps, MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
if MASK:
mask = offs_m[None, :] >= offs_n[:, None]
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
Di = tl.load(D + offs_m)
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py |
465954ee-4cfe-46e9-8668-a230f02bb257 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_dense_bmm_kernel(a_ptr, a_offset_ptr, b_ptr, c_ptr, N, K,
stride_am, stride_ak, stride_bl, stride_bk, stride_bn, stride_cm,
stride_cn, max_seq_len, allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl.
constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (sum_B(M_i), K), B has shape (B, K, N) and C has shape (sum_B(M_i), N)
"""
pid_batch = tl.program_id(0)
pid = tl.program_id(1)
begin = tl.load(a_offset_ptr + pid_batch)
end = tl.load(a_offset_ptr + pid_batch + 1)
M = tl.minimum(end - begin, max_seq_len)
if M == 0:
return
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_m = pid // num_pid_n
pid_n = pid % num_pid_n
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
if pid_m * BLOCK_SIZE_M >= M:
return
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
if pid_n * BLOCK_SIZE_N >= N:
return
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak + begin * stride_am)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] *
stride_bn + pid_batch * stride_bl)
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
updated_offset = k + offs_k
a = tl.load(a_ptrs, mask=(updated_offset[None, :] < K) & (offs_am[:,
None] < M), other=0.0)
b = tl.load(b_ptrs, mask=(updated_offset[:, None] < K) & (offs_bn[
None, :] < N), other=0.0)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
c_ptrs = c_ptr + stride_cm * offs_m[:, None] + stride_cn * offs_n[None, :
] + begin * stride_cm
tl.store(c_ptrs, c, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
ff383b02-6ac6-4d19-8a8c-ff80198f182f | z_order.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/z_order.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_z_kernel(xyz_ptr, distance_ptr, B, N, space_size, x_offset,
y_offset, z_offset, str_xyz_B, str_xyz_N, str_xyz_C, BLK: tl.constexpr,
ASSIGN_BATCH_INDEX: tl.constexpr):
pid_b = tl.program_id(0)
pid_n = tl.program_id(1)
offs_n = pid_n * BLK + tl.arange(0, BLK)
mask_n = offs_n < N
xyz_ptrs = xyz_ptr + pid_b * str_xyz_B + offs_n * str_xyz_N
fx = tl.load(xyz_ptrs + x_offset * str_xyz_C, mask=mask_n)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_C, mask=mask_n)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_C, mask=mask_n)
ret = _calculate_zorder(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
ret |= pid_b.to(tl.int64) << 48
tl.store(distance_ptr + pid_b * N + offs_n, ret, mask=mask_n)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py |
1f260b65-2aa3-4dd8-ad87-6f5bba941dd2 | block_sparse_attention_lut.py | sparklesea/sparse-quant | sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py | e3d8b6ecab208c31b744913ed8c3caaa43605f86 | 0 | @triton.jit
def _sparse_attention_prefill_fwd_kernel(Q, K, V, sm_scale, Out, lut,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, stride_lz, stride_lh,
stride_lx, Z, H, N_CTX, LT, NNZ: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
lut_indicator = tl.program_id(1) % H
qvk_offset = off_hz * stride_qh
lut_offset = lut_indicator * stride_lz
Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=(
BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0
), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
qk_scale = sm_scale * 1.44269504
q = tl.load(Q_block_ptr, boundary_check=(0, 1), padding_option='zero')
q = (q * qk_scale).to(tl.float16)
last_nnz_id = -1
for nnz_id in range(NNZ):
present_nnz_id = tl.load(lut + lut_offset + start_m * stride_lh +
nnz_id * stride_lx)
start_n = present_nnz_id * BLOCK_N
start_n = tl.multiple_of(start_n, BLOCK_N)
present_nnz_id = present_nnz_id.to(tl.int32)
k = tl.load(tl.advance(K_block_ptr, (0, start_n)), boundary_check=(
0, 1), padding_option='zero')
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
if LT:
qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk,
float('-inf'))
qk = tl.where((offs_m[:, None] < N_CTX) & ((start_n + offs_n)[None,
:] < N_CTX), qk, float('-inf'))
m_ij = tl.max(qk, 1)
p = tl.math.exp2(qk - m_ij[:, None])
p = tl.where(m_ij[:, None] == tl.full((BLOCK_M, BLOCK_N), float(
'-inf'), tl.float32), 0.0, tl.math.exp2(qk - m_ij[:, None]))
p = p * (last_nnz_id != present_nnz_id)
l_ij = tl.sum(p, 1)
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.math.exp2(m_i - m_i_new)
beta = tl.math.exp2(m_ij - m_i_new)
l_i *= alpha
l_i_new = l_i + beta * l_ij
p_scale = beta / l_i_new
p = p * p_scale[:, None]
acc_scale = l_i / l_i_new
acc = acc * acc_scale[:, None]
v = tl.load(tl.advance(V_block_ptr, (start_n, 0)), boundary_check=(
0, 1), padding_option='zero')
p = p.to(tl.float16)
acc += tl.dot(p, v)
l_i = l_i_new
m_i = m_i_new
last_nnz_id = present_nnz_id
tl.store(O_block_ptr, acc.to(tl.float16), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache",
"BSD"
] | https://github.com/sparklesea/sparse-quant/blob/e3d8b6ecab208c31b744913ed8c3caaa43605f86/sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py |
33299f98-59f0-48e0-ae23-2da139cb499d | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_softmax_backward_kernel(grad_output_ptr, softmax_output_ptr,
grad_input_ptr, input_offsets_ptr, grad_output_row_stride,
grad_output_head_stride, softmax_output_row_stride,
softmax_output_head_stride, grad_input_row_stride,
grad_input_head_stride, max_seq_len: tl.constexpr, BLOCK_SIZE: tl.constexpr
):
"""
grad_output_ptr shpae is [SUM_B, H]
softmax_output shape is [SUM_B, H]
grad_input shape is [SUM_B, H]
"""
pid_batch = tl.program_id(0)
pid_head = tl.program_id(1)
row_begin = tl.load(input_offsets_ptr + pid_batch)
row_end = tl.load(input_offsets_ptr + pid_batch + 1)
N = tl.minimum(max_seq_len, row_end - row_begin)
col_offsets = tl.arange(0, BLOCK_SIZE)
grad_output_ptrs = (grad_output_ptr + row_begin *
grad_output_row_stride + col_offsets * grad_output_row_stride +
pid_head * grad_output_head_stride)
softmax_output_ptrs = (softmax_output_ptr + row_begin *
softmax_output_row_stride + col_offsets * softmax_output_row_stride +
pid_head * softmax_output_head_stride)
grad_output_row = tl.load(grad_output_ptrs, mask=col_offsets < N, other=0.0
)
softmax_output_row = tl.load(softmax_output_ptrs, mask=col_offsets < N,
other=0.0)
sum_value = tl.sum(grad_output_row * softmax_output_row, axis=0)
grad_input_row = (grad_output_row - sum_value) * softmax_output_row
grad_input_ptrs = (grad_input_ptr + row_begin * grad_input_row_stride +
col_offsets * grad_input_row_stride + pid_head * grad_input_head_stride
)
tl.store(grad_input_ptrs, grad_input_row, mask=col_offsets < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
a5dd6188-758a-4f75-ad16-7e404fe62595 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/linear_attn/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_linear_attn_fwd_kernel_h(k, v, h, h0, ht, s_k_h, s_k_t, s_k_d,
s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, T: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT:
tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.
constexpr):
i_k, i_v, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = tl.make_block_ptr(h0 + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32)
for i_t in range(NT):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), (
s_h_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_h += tl.dot(b_k, b_v, allow_tf32=False)
if STORE_FINAL_STATE:
p_ht = tl.make_block_ptr(ht + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/chunk.py |
e7e27939-c077-46e1-9632-7858a429dae5 | k_layer_norm.py | cpuhrsch/torchfused | torchfused/triton/k_layer_norm.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _layer_norm_non_affine_fw(X, Y, M, V, stride, N, eps, **META):
_store(_layer_norm_non_affine(X, M, V, stride, N, eps, META), Y, stride,
N, META)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py |
a8c30808-d914-41b7-8bd2-094cbdbfcbd0 | k_fused_matmul_bw.py | cpuhrsch/torchfused | torchfused/triton/k_fused_matmul_bw.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.heuristics({'EVEN_N': lambda *args, **meta: args[3] % meta[
'BLOCK_COL'] == 0})
@triton.autotune(configs=[triton.Config({'BLOCK_COL': 32}, num_stages=5,
num_warps=2), triton.Config({'BLOCK_COL': 64}, num_stages=5, num_warps=
2), triton.Config({'BLOCK_COL': 128}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_COL': 256}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_COL': 512}, num_stages=3, num_warps=8), triton.Config({
'BLOCK_COL': 1024}, num_stages=3, num_warps=16)], key=['N'])
@triton.jit
def kernel_bw(GRAD_ACT, GRAD_OUT, ACT_INPUTS, N, stride_gom, stride_aim, **META
):
"""
Go over all the activation inputs, compute the corresponding gradient
"""
BLOCK_N = META['BLOCK_COL']
pid_m, pid_n = tl.program_id(axis=0), tl.program_id(axis=1)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
act_input_ptrs = ACT_INPUTS + pid_m * stride_aim + rn
if META['EVEN_N']:
act_in = tl.load(act_input_ptrs)
else:
act_in = tl.load(act_input_ptrs, mask=rn < N, other=0.0)
grad_act = META['ACTIVATION_GRAD'](act_in)
grad_out_ptrs = GRAD_OUT + pid_m * stride_gom + rn
if META['EVEN_N']:
grad_out = tl.load(grad_out_ptrs)
else:
grad_out = tl.load(grad_out_ptrs, mask=rn < N)
grad_act *= grad_out
grad_act_ptrs = GRAD_ACT + pid_m * stride_gom + rn
tl.store(grad_act_ptrs, grad_act, mask=rn < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_fused_matmul_bw.py |
e3fb6f4a-8ca3-4994-899f-d62d808652d3 | shape.py | 2niuhe/triton_utils | src/triton_utils/shape.py | 6184906ac3b86dac3ccbfac128ec393ccecde5df | 0 | @triton.jit
def store_1d(vals, ptr, sz: tl.constexpr, n, max, stride=1):
"""Store 1d block into nth chunk of vector (defined by ptr), where each chunk has size sz"""
offs = get_1d_offest(sz, n)
mask = get_1d_mask(offs, max)
tl.store(ptr + offs, vals, mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py |
ad3e39e4-beb3-4789-856e-e24e65695e79 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8]], key=['BT', 'BK'])
@triton.jit
def fwd_recompute_w_kernel(k, beta, w, A, offsets, indices, T: tl.constexpr,
H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BT, BT), (1, 0))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_A = tl.load(p_A, boundary_check=(0, 1)).to(k.dtype.element_ty)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_w = tl.dot(b_A, b_kb, allow_tf32=False)
tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py |
02c185b4-ba6d-4e60-84de-9ccd865f78e9 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_fwd_kernel_intra_K(v, z, o, A, s_v_h, s_v_t, s_v_d, T: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.
constexpr, NC: tl.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_t, i_i = i_c // NC, i_c % NC
p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_zn = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), ((i_t *
BT + i_i * BC) * V + i_v * BV,), (BV,), (0,))
b_zn = tl.load(p_zn, boundary_check=(0,))
b_o = tl.zeros([BC, BV], dtype=tl.float32)
for i_j in range(0, i_i):
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_o += tl.dot(b_A, tl.exp(b_v - b_zn[None, :]).to(b_v.dtype),
allow_tf32=False)
b_z = tl.load(p_z, boundary_check=(0, 1))
b_o *= tl.exp(b_zn[None, :] - b_z)
o_i = tl.arange(0, BC)
o_A = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC)
) * BT + i_i * BC
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
for j in range(0, BC):
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T * V,), (1,), ((i_t *
BT + i_i * BC + j) * V + i_v * BV,), (BV,), (0,))
b_A = tl.load(A + o_A + j, mask=m_A, other=0)
b_v = tl.load(p_v, boundary_check=(0,)).to(tl.float32)
m_i = o_i[:, None] >= j
b_o += tl.where(m_i, b_A[:, None] * tl.exp(b_v[None, :] - b_z), 0)
p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
159f0cc5-72b0-4231-97dc-2a5e3f2c0d0b | hilbert.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/hilbert.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_hilbert_unpadded_kernel(xyz_ptr, batch_idx_ptr, code_ptr,
space_size, x_offset, y_offset, z_offset, str_xyz_n, str_xyz_c, N, BLK:
tl.constexpr, ASSIGN_BATCH_INDEX: tl.constexpr):
pid = tl.program_id(0)
offs_n = pid * BLK + tl.arange(0, BLK)
mask = offs_n < N
xyz_ptrs = xyz_ptr + offs_n * str_xyz_n
fx = tl.load(xyz_ptrs + x_offset * str_xyz_c, mask=mask)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_c, mask=mask)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_c, mask=mask)
ret = _calculate_hilbert_distance(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
batch_idx_ptrs = batch_idx_ptr + offs_n
batch_idx = tl.load(batch_idx_ptrs, mask=mask).to(tl.int64)
ret |= batch_idx << 48
code_ptrs = code_ptr + offs_n
tl.store(code_ptrs, ret, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/hilbert.py |
2c6c706f-18a5-446c-bc50-dd5319c23177 | triton_fused_local_attn_rerope.py | LouChao98/vqtree | ops/triton_fused_local_attn_rerope.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q1, Q2, K1, K2, V, Out, L, softmax_scale, stride_qb,
stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb,
stride_vh, stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q,
seqlen_k, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, WINDOW_SIZE: tl.
constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N:
tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, WRITE_LSE:
tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
Q1_block_ptr = tl.make_block_ptr(base=Q1 + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
Q2_block_ptr = tl.make_block_ptr(base=Q2 + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
K1_block_ptr = tl.make_block_ptr(base=K1 + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
K2_block_ptr = tl.make_block_ptr(base=K2 + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h *
stride_vh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_vn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q1 = tl.load(Q1_block_ptr)
q2 = tl.load(Q2_block_ptr)
else:
q1 = tl.load(Q1_block_ptr, boundary_check=(0,), padding_option='zero')
q2 = tl.load(Q2_block_ptr, boundary_check=(0,), padding_option='zero')
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q1, q2, softmax_scale,
K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n,
seqlen_k, WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 1)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q1, q2, softmax_scale,
K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n,
seqlen_k, WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 2)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q1, q2, softmax_scale,
K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n,
seqlen_k, WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 3)
if WRITE_LSE:
l_ptrs = L + off_hb * seqlen_q + offs_m
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
acc = acc / l_i[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn_rerope.py |
295647f9-b805-4b3f-8f9a-72124ff188fd | relu.py | daemyung/practice-triton | relu.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @staticmethod
@triton.jit
def backward(grad_input_ptr, grad_output_ptr, input_ptr, size, block_size:
tl.constexpr):
pid = tl.program_id(0)
offset = pid * block_size
grad_input_block_ptr = tl.make_block_ptr(grad_input_ptr, shape=(size,),
strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,))
grad_output_block_ptr = tl.make_block_ptr(grad_output_ptr, shape=(size,
), strides=(1,), offsets=(offset,), block_shape=(block_size,),
order=(0,))
input_block_ptr = tl.make_block_ptr(input_ptr, shape=(size,), strides=(
1,), offsets=(offset,), block_shape=(block_size,), order=(0,))
grad_output = tl.load(grad_output_block_ptr, boundary_check=(0,))
input = tl.load(input_block_ptr, boundary_check=(0,))
condition = input >= 0
grad_input = tl.where(condition, grad_output, 0)
tl.store(grad_input_block_ptr, grad_input, boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/relu.py |
98f78d67-d8c7-4106-a9bc-6716d5cd0889 | sb_varlen_fwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_varlen/sb_varlen_fwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.jit
def compute_block(q, k, qk_scale, neg_log_acc, M_blk_idxs, N_blk_idxs, cm,
on_band: tl.constexpr, ALLOW_TF32: tl.constexpr, backward: tl.constexpr,
attend_current: tl.constexpr=False, use_cumsum: tl.constexpr=False,
is_compiling: tl.constexpr=False):
qk = tl.dot(q, tl.trans(k), allow_tf32=ALLOW_TF32) * qk_scale
log_om_beta = -softplus(qk, is_compiling=is_compiling)
if on_band:
if attend_current:
block_mask = M_blk_idxs[:, None] >= N_blk_idxs[None, :]
else:
block_mask = M_blk_idxs[:, None] > N_blk_idxs[None, :]
log_om_beta = tl.where(block_mask, log_om_beta, 0.0)
if backward:
neg_log_acc -= tl.sum(log_om_beta, axis=1)
log_p = qk + neg_log_acc[:, None]
if use_cumsum:
log_p += tl.cumsum(log_om_beta.to(q.dtype), axis=1, reverse=True)
else:
log_p = tl.dot(log_om_beta.to(q.dtype), cm, acc=log_p,
allow_tf32=ALLOW_TF32)
p = tl.math.exp2(log_p)
p = tl.where(block_mask, p, 0.0)
else:
if backward:
neg_log_acc -= tl.sum(log_om_beta, axis=1)
log_p = qk + neg_log_acc[:, None]
if use_cumsum:
log_p += tl.cumsum(log_om_beta.to(q.dtype), axis=1, reverse=True)
else:
log_p = tl.dot(log_om_beta.to(q.dtype), cm, acc=log_p,
allow_tf32=ALLOW_TF32)
p = tl.math.exp2(log_p)
if not backward:
neg_log_acc += tl.sum(log_om_beta, axis=1)
return p, log_om_beta, neg_log_acc
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Activation Functions"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py |
c3784dfb-4b90-4a4f-9175-4a69cc1f915d | fused_attn.py | thunlp/Delta-CoMe | quant/fused_attn.py | 646a1fbf3443295c4b04aba27334c6bc5aa3df4f | 0 | @triton.jit
def rotate_half_kernel(qk_seq_ptr, position_ids_ptr, qk_seq_stride,
position_ids_batch_stride, seq_len, HEAD_DIM: tl.constexpr,
BLOCK_HEIGHT: tl.constexpr, BLOCK_WIDTH: tl.constexpr, INV_BASE: tl.
constexpr):
HALF_HEAD: tl.constexpr = HEAD_DIM // 2
STEPS_PER_ROW: tl.constexpr = HALF_HEAD // BLOCK_WIDTH
batch_seq = tl.program_id(axis=0)
row_blk_x_col_blk = tl.program_id(axis=1)
row_blk = row_blk_x_col_blk // STEPS_PER_ROW
row = row_blk * BLOCK_HEIGHT
if BLOCK_WIDTH < HALF_HEAD:
col_blk = row_blk_x_col_blk % STEPS_PER_ROW
col = col_blk * BLOCK_WIDTH
else:
col: tl.constexpr = 0
batch = batch_seq // seq_len
seq = batch_seq % seq_len
position_id = tl.load(position_ids_ptr + batch *
position_ids_batch_stride + seq)
freq = tl.libdevice.exp((col + tl.arange(0, BLOCK_WIDTH)).to(tl.float32
) * INV_BASE) * position_id
cos = tl.cos(freq).to(tl.float32)
sin = tl.sin(freq).to(tl.float32)
col_offsets: tl.constexpr = tl.arange(0, BLOCK_WIDTH)
embed_offsets = row * HEAD_DIM + col + col_offsets
x_ptrs = qk_seq_ptr + batch_seq * qk_seq_stride + embed_offsets
for k in range(0, BLOCK_HEIGHT):
x = tl.load(x_ptrs).to(tl.float32)
y = tl.load(x_ptrs + HALF_HEAD).to(tl.float32)
out_x = x * cos - y * sin
tl.store(x_ptrs, out_x)
out_y = x * sin + y * cos
tl.store(x_ptrs + HALF_HEAD, out_y)
x_ptrs += HEAD_DIM
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/thunlp/Delta-CoMe/blob/646a1fbf3443295c4b04aba27334c6bc5aa3df4f/quant/fused_attn.py |
297bd9f8-dbf4-4cd4-b87b-6208c25245d1 | pointwise.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/pointwise.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_copy_kernel(input_ptr, out_ptr, numel: tl.constexpr, block_size:
tl.constexpr):
block_start = tl.program_id(axis=0).to(tl.int64) * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < numel
input_ = tl.load(input_ptr + offsets, mask=mask)
tl.store(out_ptr + offsets, input_, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/pointwise.py |
c7b00225-9961-4819-bb21-d098a0681a35 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'BLOCK_SIZE_M':
16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4)],
key=['M', 'N', 'K'])
@triton.jit
def rz_linear_backward_weight_grad_kernel_fp32(a_ptr, b_ptr, c_ptr,
init_factor, M, N, K, H, stride_am, stride_ak, stride_bm, stride_bn, R7:
int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=
c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=
stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=
stride_bn, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=False, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=
BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
b43d035a-53a0-4ffb-8bee-abc8a227f8b9 | ops.py | shawntan/scattermoe | scattermoe/kernels/ops.py | 63b76a2f5f28c052fb4cd7c34479a54158354052 | 0 | @triton.autotune(configs=_scatter2scatter_configs(), key=['M', 'N', 'K'])
@triton.heuristics({'NO_K_MASK': lambda args: args['K'] % args['BLOCK_K'] ==
0, 'NO_N_MASK': lambda args: args['N'] % args['BLOCK_N'] == 0})
@triton.jit
def _scatter2scatter(X_ptr, stride_xm, stride_xk, W_ptr, stride_we,
stride_wk, stride_wn, Y_ptr, stride_ym, stride_yn, grouped_idx_ptr,
expert_idxs_ptr, block_start_idx_ptr, FAN_OUT: tl.constexpr, M, K: tl.
constexpr, N: tl.constexpr, E: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, ACC_TYPE: tl.constexpr,
OUT_M, allow_tf32: tl.constexpr, x_grouped: tl.constexpr, y_grouped: tl
.constexpr, NO_K_MASK: tl.constexpr, NO_N_MASK: tl.constexpr):
pid = tl.program_id(axis=0)
N_BLOCK_COUNT = tl.cdiv(N, BLOCK_N)
M_block_id = pid // N_BLOCK_COUNT
N_block_id = pid % N_BLOCK_COUNT
M_range = tl.arange(0, BLOCK_M)
block_start_idx = tl.load(block_start_idx_ptr + M_block_id)
M_block = tl.max_contiguous(block_start_idx + M_range, BLOCK_M)
E_idxs = tl.load(expert_idxs_ptr + M_block, mask=M_block < FAN_OUT * M,
other=E)
E_idx = tl.min(E_idxs)
E_mask = E_idxs == E_idx
M_idx = tl.load(grouped_idx_ptr + M_block, mask=E_mask, other=0)
if x_grouped:
M_in_idx = M_block
else:
M_in_idx = M_idx // FAN_OUT
if y_grouped:
M_out_idx = M_block
else:
M_out_idx = M_idx
K_block = tl.arange(0, BLOCK_K)
N_block = N_block_id * BLOCK_N + tl.arange(0, BLOCK_N)
N_mask = N_block < N
X_blk_ptrs = X_ptr + M_in_idx[:, None] * stride_xm + K_block[None, :
] * stride_xk
W_blk_ptrs = W_ptr + K_block[:, None] * stride_wk + N_block[None, :
] * stride_wn + E_idx * stride_we
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
iters = tl.cdiv(K, BLOCK_K)
for K_block_id in range(0, iters):
if NO_K_MASK:
x = tl.load(X_blk_ptrs, mask=E_mask[:, None])
if NO_N_MASK or K_block_id < iters - 1:
w = tl.load(W_blk_ptrs)
else:
w = tl.load(W_blk_ptrs, mask=N_mask[None, :])
else:
K_mask = K_block_id * BLOCK_K + K_block < K
x = tl.load(X_blk_ptrs, mask=E_mask[:, None] & K_mask[None, :])
w = tl.load(W_blk_ptrs, mask=K_mask[:, None] & N_mask[None, :])
X_blk_ptrs += BLOCK_K * stride_xk
W_blk_ptrs += BLOCK_K * stride_wk
acc += tl.dot(x, w, allow_tf32=allow_tf32, out_dtype=ACC_TYPE)
Y_blk_ptrs = Y_ptr + (M_out_idx[:, None] * stride_ym + N_block[None, :] *
stride_yn)
tl.store(Y_blk_ptrs, acc, mask=E_mask[:, None] & N_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/ops.py |
98ed13e6-3d06-4b48-bf3b-55ccbee71cfb | dw_conv.py | neuro-ml/kerops | kerops/kernels/dw_conv.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _DWConv_wgrad_cl3d_impl(grad_ptr, input_ptr, weight_grad_ptr, H, W, D,
H_stride, W_stride, ACCTYPE: tl.constexpr, channels: tl.constexpr,
D_block: tl.constexpr, WD_grid):
H_cell = tl.program_id(0)
W_D_cell = tl.program_id(1)
D_gridsize = tl.cdiv(D, D_block)
W_cell = W_D_cell // D_gridsize
D_cell = W_D_cell % D_gridsize
input_ptr += D_cell * D_block * channels
grad_ptr += D_cell * D_block * channels
weight_grad_ptr += (H_cell * WD_grid + W_D_cell) * 27 * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset,
channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[None, None, :] * channels + channels_offset[None, :, None
] + near_offset[:, None, None] * channels
mask = d_offset[None, None, :] + near_offset[:, None, None
] < D - D_block * D_cell
mask = mask and d_offset[None, None, :] + near_offset[:, None, None
] >= 0 - D_block * D_cell
mask = mask and near_offset[:, None, None] != 2
in_offset = d_offset[None, None, :] * channels + channels_offset[None,
:, None]
in_mask = d_offset[None, None, :] < D - D_block * D_cell
H1_load = 2 * H_cell + 1 < H
W1_load = 2 * W_cell + 1 < W
h0_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + 2 * W_cell * W_stride
x_h0_w0 = tl.load(tmp_input_ptr + in_offset, mask=in_mask, other=0.0)
tmp_input_ptr = input_ptr + (2 * H_cell + 1
) * H_stride + 2 * W_cell * W_stride
x_h1_w0 = tl.load(tmp_input_ptr + in_offset, mask=in_mask and H1_load,
other=0.0)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + (2 * W_cell + 1
) * W_stride
x_h0_w1 = tl.load(tmp_input_ptr + in_offset, mask=in_mask and W1_load,
other=0.0)
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1
) * W_stride
x_h1_w1 = tl.load(tmp_input_ptr + in_offset, mask=in_mask and (W1_load and
H1_load), other=0.0)
gradw_offset = tl.arange(0, 4)[:, None] * channels + channels_offset[
None, :]
gradw_mask = near_offset[:, None] != 2
load_next = (2 * H_cell - 1 < H and 2 * H_cell - 1 >= 0) and (2 *
W_cell - 1 < W and 2 * W_cell - 1 >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell - 1) * H_stride + (2 * W_cell - 1
) * W_stride
i = -1
j = -1
grad = tl.zeros([4, channels, D_block], dtype=tl.float16)
if load_next:
grad = tl.load(tmp_grad_ptr + offset, mask=mask)
for k in tl.static_range(0, 16):
if load_next:
if i == -1 and j == -1:
h2_w2 += tl.sum(grad * x_h0_w0, axis=2)
elif i == -1 and j == 0:
h2_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 1:
h2_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 2:
h2_w0 += tl.sum(grad * x_h0_w1, axis=2)
elif i == 0 and j == -1:
h1_w2 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 0 and j == 0:
h1_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h1_w0, axis=2)
h1_w2 += tl.sum(grad * x_h0_w1, axis=2)
h2_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 1:
h1_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w0 += tl.sum(grad * x_h1_w0, axis=2)
h1_w1 += tl.sum(grad * x_h0_w1, axis=2)
h2_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 2:
h1_w0 += tl.sum(grad * x_h0_w1, axis=2)
h2_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == -1:
h0_w2 += tl.sum(grad * x_h0_w0, axis=2)
h1_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 1 and j == 0:
h0_w1 += tl.sum(grad * x_h0_w0, axis=2)
h1_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h0_w1, axis=2)
h1_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 1:
h0_w0 += tl.sum(grad * x_h0_w0, axis=2)
h1_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h0_w1, axis=2)
h1_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 2:
h0_w0 += tl.sum(grad * x_h0_w1, axis=2)
h1_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == -1:
h0_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 2 and j == 0:
h0_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == 1:
h0_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h1_w1, axis=2)
else:
h0_w0 += tl.sum(grad * x_h1_w1, axis=2)
k_ = k + 1
i = k_ % 4 - 1
j = k_ // 4 - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 *
W_cell + j < W and 2 * W_cell + j >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j
) * W_stride
if load_next and k_ < 16:
grad = tl.load(tmp_grad_ptr + offset, mask=mask)
tl.store(weight_grad_ptr + gradw_offset, h0_w0, mask=gradw_mask)
tl.store(weight_grad_ptr + 3 * channels + gradw_offset, h0_w1, mask=
gradw_mask)
tl.store(weight_grad_ptr + 6 * channels + gradw_offset, h0_w2, mask=
gradw_mask)
tl.store(weight_grad_ptr + 9 * channels + gradw_offset, h1_w0, mask=
gradw_mask)
tl.store(weight_grad_ptr + 12 * channels + gradw_offset, h1_w1, mask=
gradw_mask)
tl.store(weight_grad_ptr + 15 * channels + gradw_offset, h1_w2, mask=
gradw_mask)
tl.store(weight_grad_ptr + 18 * channels + gradw_offset, h2_w0, mask=
gradw_mask)
tl.store(weight_grad_ptr + 21 * channels + gradw_offset, h2_w1, mask=
gradw_mask)
tl.store(weight_grad_ptr + 24 * channels + gradw_offset, h2_w2, mask=
gradw_mask)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/dw_conv.py |
2f63fe4e-4b75-45aa-b01f-c5b1eed17423 | main_triton.py | dwgan/GraphMST | main_triton.py | 4d65ed0f108d339e3e4cfff25085a39adc6a48a2 | 0 | @triton.jit
def find_kernel(parent, u, ret_ptr, BLOCK_SIZE: tl.constexpr):
pu = tl.load(parent + u)
while pu != u:
u = pu
pu = tl.load(parent + u)
ret_ptr[u % BLOCK_SIZE] = pu
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/dwgan/GraphMST/blob/4d65ed0f108d339e3e4cfff25085a39adc6a48a2/main_triton.py |
fee89749-a0a9-4316-87b5-75545363f010 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16]], key=['BK'])
@triton.jit
def fwd_prepare_wy_repr_kernel_chunk32(k, beta, A, offsets, indices, T: tl.
constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.
constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_A = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_A += tl.dot(b_kb, tl.trans(b_k), allow_tf32=False)
b_A = -tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :],
b_A, 0)
for i in range(1, BT):
mask = tl.arange(0, BT) == i
b_a = tl.sum(tl.where(mask[:, None], b_A, 0), 0)
b_a = b_a + tl.sum(b_a[:, None] * b_A, 0) * (tl.arange(0, BT) < i)
b_A = tl.where(mask[:, None], b_a, b_A)
b_A += tl.arange(0, BT)[:, None] == tl.arange(0, BT)[None, :]
if HEAD_FIRST:
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BT, BT), (1, 0))
tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1))
b_A = b_A.to(k.dtype.element_ty)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py |
3ccfe00c-b0c4-4828-873a-fd5b2174ea1b | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_mean/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS,
NUM_STAGES)], key=['M'])
@triton.jit
def triton_jagged_mean_kernel_variable_length_loop_sum_then_buffer(
input_ptr_values, input_ptr_offsets, output_ptr, M, BLOCK_SIZE_RAGGED:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((1, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load(
input_ptr_offsets + (pid_b + 1))
ragged_len = ragged_end - ragged_start
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer += tl.sum(input, axis=0)
buffer_view = buffer.reshape((BLOCK_SIZE_M,))
buffer_view_mean = buffer_view * (1 / ragged_len)
output_offsets = offsets_m + pid_b * M
output_mask = output_offsets < M * (pid_b + 1)
tl.store(output_ptr + output_offsets, buffer_view_mean, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py |
58fb14a4-ee5b-47db-b1e7-de8b3fd737be | fused_moe.py | Charlie-XIAO/sparse-vllm | vllm/model_executor/layers/fused_moe/fused_moe.py | d228909a30b0c245c35417fb7d2acdf9a3690042 | 0 | @triton.jit
def fused_moe_kernel(a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr,
topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr,
num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am,
stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn,
stride_bse, stride_bsn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr,
MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.
constexpr, use_fp8_w8a8: tl.constexpr, use_int8_w8a16: tl.constexpr):
"""
Implements the fused computation for a Mixture of Experts (MOE) using
token and expert matrices.
Key Parameters:
- A: The input tensor representing tokens with shape (*, K), where '*' can
be any shape representing batches and K is the feature dimension of
each token.
- B: The stacked MOE weight tensor with shape (E, N, K), where E is
the number of experts, K is the input feature dimension, and N is
the output feature dimension.
- C: The output cache tensor with shape (M, topk, N), where M is the
total number of tokens post padding, topk is the number of times
each token is repeated, and N is the output feature dimension.
- sorted_token_ids: A tensor containing the sorted indices of tokens,
repeated topk times and arranged by the expert index they are
assigned to.
- expert_ids: A tensor containing the indices of the expert for each
block. It determines which expert matrix from B should be used for
each block in A.
This kernel performs the multiplication of a token by its corresponding
expert matrix as determined by `expert_ids`. The sorting of
`sorted_token_ids` by expert index and padding ensures divisibility by
BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix
multiplication across different blocks processed by the same expert.
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr)
if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded:
return
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[
None, :] * stride_ak)
off_experts = tl.load(expert_ids_ptr + pid_m)
b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk +
offs_bn[None, :] * stride_bn)
if use_int8_w8a16:
b_scale_ptrs = b_scale_ptr + off_experts * stride_bse + offs_bn[None, :
] * stride_bsn
b_scale = tl.load(b_scale_ptrs)
if use_fp8_w8a8:
a_scale = tl.load(a_scale_ptr)
b_scale = tl.load(b_scale_ptr + off_experts)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K -
k * BLOCK_SIZE_K), other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K,
other=0.0)
if use_int8_w8a16:
accumulator = tl.dot(a, b.to(compute_type), acc=accumulator)
elif use_fp8_w8a8:
accumulator = tl.dot(a, b, acc=accumulator)
else:
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if MUL_ROUTED_WEIGHT:
moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask,
other=0)
accumulator = accumulator * moe_weight[:, None]
if use_int8_w8a16:
accumulator = (accumulator * b_scale).to(compute_type)
elif use_fp8_w8a8:
accumulator = (accumulator * a_scale * b_scale).to(compute_type)
else:
accumulator = accumulator.to(compute_type)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[
None, :]
c_mask = token_mask[:, None] & (offs_cn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
| {
"Data Type": [
"int8",
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Top-K Selection"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/fused_moe/fused_moe.py |
b915901f-25e7-4e07-86fb-3ce11a600e0e | test_triton_varargs.py | facebookresearch/xformers | tests/test_triton_varargs.py | a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc | 0 | @triton.jit
def kernel(x_ptrs: 'VAR_ARGS_ARRAY', y_ptrs: 'VAR_ARGS_ARRAY', numel,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
offsets = BLOCK_SIZE * pid + tl.arange(0, BLOCK_SIZE)
mask = offsets < numel
for i in range(len(x_ptrs)):
x_ptr = x_ptrs[i]
y_ptr = y_ptrs[i]
data = tl.load(x_ptr + offsets, mask)
result = data * data
tl.store(y_ptr + offsets, result, mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/facebookresearch/xformers/blob/a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc/tests/test_triton_varargs.py |
b5a86ee1-c573-4ef0-b121-3f8c69923b2c | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(list(filter(keep, configsWS)), key=['N_CTX'])
@triton.jit
def _attn_fwd_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr, STAGE: tl.
constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.constexpr,
ENABLE_WS: tl.constexpr):
tl.static_assert(BLOCK_N <= HEAD_DIM)
pid = tl.program_id(0)
off_hz = tl.program_id(1)
_attn_fwd_compute_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid,
Z, H, N_CTX, BLOCK_M, BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA,
LOOP_SCHEDULE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
ac3a79c4-8892-4afd-bc3a-b1376778c60f | parallel_scan.py | chengkai-liu/RecBLR | parallel_scan.py | 66e520c26e28c05a5425ba2e81c9169b7e0176e2 | 0 | @triton.jit
def pack64(a, b):
tl.static_assert(a.dtype == tl.float32)
tl.static_assert(b.dtype == tl.float32)
a = a.to(dtype=tl.uint32, bitcast=True).to(tl.uint64)
a = a << 32
b = b.to(dtype=tl.uint32, bitcast=True).to(tl.uint64)
return a | b
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Register Intensive"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py |
aeb1d88e-f7a3-4baf-ad05-a447c96fd287 | nll_loss_kernels.py | BobMcDear/attorch | attorch/nll_loss_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim',
'spatial_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_SPATIAL': lambda args: next_power_of_2(args['spatial_dim'])})
@triton.jit
def nll_loss_forward_kernel(input_pointer, target_pointer, weight_pointer,
sum_weights_pointer, output_pointer, batch_dim, spatial_dim,
input_batch_stride, input_feat_stride, input_spatial_stride,
target_batch_stride, target_spatial_stride, output_batch_stride,
output_spatial_stride, reduction: tl.constexpr, weighted: tl.constexpr,
BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_SPATIAL: tl.constexpr):
"""
Measures the negative log likelihood loss between the input and target,
with optional reweighing of each class.
Args:
input_pointer: Pointer to the input.
The input must be of shape [batch_dim, feat_dim, spatial_dim].
target_pointer: Pointer to the target.
The target must be of shape [batch_dim, spatial_dim].
weight_pointer: Pointer to an optional class weight vector.
The class weight vector, if provided, must be of shape [feat_dim].
sum_weights_pointer: Pointer to a container the sum of the class weights is written to.
The container must be of shape [batch_dim/BLOCK_SIZE_BATCH].
output_pointer: Pointer to a container the loss is written to.
The container must be of shape [batch_dim, spatial_dim] if reduction is 'none',
and otherwise of shape [batch_dim/BLOCK_SIZE].
batch_dim: Batch dimension.
spatial_dim: Spatial dimension.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
input_spatial_stride: Stride necessary to jump one element along the
input's spatial dimension.
target_batch_stride: Stride necessary to jump one element along the
target's batch dimension.
target_spatial_stride: Stride necessary to jump one element along the
target's spatial dimension.
output_batch_stride: Stride necessary to jump one element along the
output container's batch dimension.
output_spatial_stride: Stride necessary to jump one element along the
output container's spatial dimension.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
If a reduction method is specified, the reduced result of each
program is written to a separate index in the summed weights and
output container, which should later be summed.
weighted: Flag for weighing each class.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_SPATIAL: Block size across the spatial dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
spatial_offset = tl.arange(0, BLOCK_SIZE_SPATIAL)
batch_mask = batch_offset < batch_dim
spatial_mask = spatial_offset < spatial_dim
target_pointer += target_batch_stride * batch_offset[:, None
] + target_spatial_stride * spatial_offset[None, :]
target = tl.load(target_pointer, mask=batch_mask[:, None] &
spatial_mask[None, :])
input_pointer += (input_feat_stride * target + input_batch_stride *
batch_offset[:, None] + input_spatial_stride * spatial_offset[None, :])
input = tl.load(input_pointer, mask=batch_mask[:, None] & spatial_mask[
None, :]).to(tl.float32)
output = -input
if weighted:
weight = tl.load(weight_pointer + target, mask=batch_mask[:, None] &
spatial_mask[None, :]).to(tl.float32)
output *= weight
if reduction == 'none':
output_pointer += output_batch_stride * batch_offset[:, None
] + output_spatial_stride * spatial_offset[None, :]
tl.store(output_pointer, output, mask=batch_mask[:, None] &
spatial_mask[None, :])
elif reduction == 'mean':
if weighted:
tl.store(sum_weights_pointer + batch_pid, tl.sum(weight))
tl.store(output_pointer + batch_pid, tl.sum(output))
else:
tl.store(output_pointer + batch_pid, tl.sum(output) / (
batch_dim * spatial_dim))
elif reduction == 'sum':
tl.store(output_pointer + batch_pid, tl.sum(output))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/nll_loss_kernels.py |
f9da9e73-4afd-45c7-a28d-2725468622a1 | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=warps) for warps in [
4, 8, 16]], key=['QUERY_GROUP_SIZE', 'HEAD_SIZE', 'NUM_PARTITIONS',
'PARTITION_SIZE'])
@triton.jit
def _paged_attn_w_mma_v2_reduce_kernel(out_ptr, m_i_ptr, l_i_ptr,
tmp_out_ptr, context_lens_ptr, max_num_partitions, stride_o0, stride_o1,
stride_o2, HEAD_SIZE: tl.constexpr, QUERY_GROUP_SIZE: tl.constexpr,
PADDED_QUERY_GROUP_SIZE: tl.constexpr, NUM_KV_HEADS: tl.constexpr,
PARTITION_SIZE: tl.constexpr, NUM_PARTITIONS: tl.constexpr):
seq_idx = tl.program_id(0)
kv_head_idx = tl.program_id(1)
context_len = tl.load(context_lens_ptr + seq_idx)
num_partitions = tl.cdiv(context_len, PARTITION_SIZE)
group_head_offset = tl.arange(0, PADDED_QUERY_GROUP_SIZE)[:, None
] * HEAD_SIZE + tl.arange(0, HEAD_SIZE)[None, :]
group_mask = tl.arange(0, PADDED_QUERY_GROUP_SIZE)[:, None
] < QUERY_GROUP_SIZE
if num_partitions == 1:
tmp_out_offset = ((seq_idx * NUM_KV_HEADS + kv_head_idx) *
max_num_partitions * QUERY_GROUP_SIZE * HEAD_SIZE +
group_head_offset)
tmp_out = tl.load(tmp_out_ptr + tmp_out_offset, mask=group_mask,
other=0.0)
out_offset = (seq_idx * stride_o0 + kv_head_idx * QUERY_GROUP_SIZE *
stride_o1 + group_head_offset * stride_o2)
tl.store(out_ptr + out_offset, tmp_out, mask=group_mask)
return
ml_offset = (seq_idx * NUM_KV_HEADS + kv_head_idx
) * max_num_partitions * QUERY_GROUP_SIZE + tl.arange(0, NUM_PARTITIONS
)[:, None] * QUERY_GROUP_SIZE + tl.arange(0, PADDED_QUERY_GROUP_SIZE)[
None, :]
mask = (tl.arange(0, NUM_PARTITIONS)[:, None] < num_partitions) & (tl.
arange(0, PADDED_QUERY_GROUP_SIZE)[None, :] < QUERY_GROUP_SIZE)
m_i = tl.load(m_i_ptr + ml_offset, mask=mask, other=float('-inf'))
m = tl.max(m_i, axis=0)
l_i = tl.load(l_i_ptr + ml_offset, mask=mask, other=0.0)
l_i *= tl.exp(m_i - m[None, :])
l = tl.sum(l_i, axis=0)
r = l_i / l[None, :]
r = tl.reshape(r, (NUM_PARTITIONS, PADDED_QUERY_GROUP_SIZE, 1))
tmp_out_offset = (seq_idx * NUM_KV_HEADS + kv_head_idx
) * max_num_partitions * QUERY_GROUP_SIZE * HEAD_SIZE + tl.arange(0,
NUM_PARTITIONS)[:, None, None
] * QUERY_GROUP_SIZE * HEAD_SIZE + tl.arange(0, PADDED_QUERY_GROUP_SIZE
)[None, :, None] * HEAD_SIZE + tl.arange(0, HEAD_SIZE)[None, None, :]
tmp_out = tl.load(tmp_out_ptr + tmp_out_offset, mask=mask[:, :, None],
other=0.0)
out = tl.sum((tmp_out * r).to(tl.float32), axis=0)
out_offset = (seq_idx * stride_o0 + kv_head_idx * QUERY_GROUP_SIZE *
stride_o1 + group_head_offset * stride_o2)
tl.store(out_ptr + out_offset, out, mask=group_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
fbbb8e30-59c7-4345-a0af-c5932ca05a42 | hello_triton.py | gmgu/study-triton | 1_hello_triton/hello_triton.py | 3a9a24fd3f1de3e7465535ffe72f6deac8a419bd | 0 | @triton.jit
def hello_kernel():
print('Hello Triton Kernel!')
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/1_hello_triton/hello_triton.py |
89417c21-0b2b-4b0f-bb94-3113c88d8895 | adam.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/adam.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_adam_kernel(params_ptr, grads_ptr, exp_avgs_ptr, exp_avg_sqs_ptr,
noop_flag_ptr, scale_ptr, step_size, beta1, beta2, bias_correction,
decay_factor, epsilon, numel: tl.constexpr, block_size: tl.constexpr):
noop_flag = tl.load(noop_flag_ptr)
if noop_flag != 0:
return
scale = tl.load(scale_ptr)
block_start = tl.program_id(axis=0).to(tl.int64) * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < numel
params = tl.load(params_ptr + offsets, mask=mask)
grads = tl.load(grads_ptr + offsets, mask=mask)
grads = scale * grads
exp_avgs = tl.load(exp_avgs_ptr + offsets, mask=mask)
exp_avgs = beta1 * exp_avgs + (1 - beta1) * grads
tl.store(exp_avgs_ptr + offsets, exp_avgs, mask=mask)
exp_avg_sqs = tl.load(exp_avg_sqs_ptr + offsets, mask=mask)
exp_avg_sqs = beta2 * exp_avg_sqs + (1 - beta2) * grads * grads
tl.store(exp_avg_sqs_ptr + offsets, exp_avg_sqs, mask=mask)
params = decay_factor * params - step_size * exp_avgs / (tl.sqrt(
exp_avg_sqs) / bias_correction + epsilon)
tl.store(params_ptr + offsets, params, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/adam.py |
b21323e3-f171-4003-9eda-bd4fcfee5aff | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def make_bounds(offs_m, offs_n, M, N, EVEN_M: tl.constexpr, EVEN_N: tl.
constexpr):
if EVEN_M:
mask = offs_n[None, :] < N
elif EVEN_N:
mask = offs_m[:, None] < M
else:
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
return mask
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
5f6f1215-91df-4856-935d-ad21674c7526 | rwkv_log.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/rwkv_log.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def logaddexp(a, b):
max_ab = tl.maximum(a, b)
return max_ab + tl.log(tl.exp(a - max_ab) + tl.exp(b - max_ab))
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_log.py |
aeaaa009-521f-43fd-884c-f286d78d2d44 | fused_linear_cross_entropy.py | sustcsonglin/flash-linear-attention | fla/modules/fused_linear_cross_entropy.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def cross_entropy_kernel(logits, lse, target, loss, total, ignore_index,
label_smoothing: tl.constexpr, logit_scale: tl.constexpr, reduction: tl
.constexpr, V: tl.constexpr, BV: tl.constexpr):
"""
This kernel computes both cross entropy loss and the gradient of the input.
We only consider hard label + mean reduction for now.
Please refer to https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html for the math.
Args:
logits:
Pointer to logits tensor.
lse:
Pointer to logsumexp tensor.
target: Pointer to target tensor.
loss:
Pointer to tensor to store the loss.
V (int):
The number of columns in the input tensor.
total (int):
The number of non-ignored classes.
ignore_index (int):
The index to ignore in the target.
label_smoothing (float):
The amount of smoothing when computing the loss, where 0.0 means no smoothing.
reduction (str):
The string for the reduction to apply
BV (int):
The block size for vocab.
"""
i_n = tl.program_id(0).to(tl.int64)
NV = tl.cdiv(V, BV)
b_y = tl.load(target + i_n)
logits += i_n * V
if b_y == ignore_index:
for i in range(0, V, BV):
o_v = i + tl.arange(0, BV)
tl.store(logits + o_v, 0.0, mask=o_v < V)
return
b_l = tl.load(logits + b_y) * logit_scale
b_lse = tl.load(lse + i_n)
b_loss = b_lse - b_l
b_z = 0.0
eps = label_smoothing / V
tl.debug_barrier()
for iv in range(0, NV):
o_v = iv * BV + tl.arange(0, BV)
b_logits = tl.load(logits + o_v, mask=o_v < V, other=float('-inf')
) * logit_scale
if label_smoothing > 0:
b_z += tl.sum(tl.where(o_v < V, -eps * b_logits, 0.0))
b_p = (tl.exp(b_logits - b_lse) - eps) * logit_scale
if reduction == 'mean':
b_p = b_p / total
tl.store(logits + o_v, b_p, mask=o_v < V)
tl.debug_barrier()
if label_smoothing > 0:
b_loss = b_loss * (1 - label_smoothing) + (b_z + label_smoothing *
b_lse)
b_l = tl.load(logits + b_y)
if reduction == 'mean':
b_loss = b_loss / total
b_l += (label_smoothing - 1) / total * logit_scale
else:
b_l += (label_smoothing - 1) * logit_scale
tl.store(loss + i_n, b_loss)
tl.store(logits + b_y, b_l)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_linear_cross_entropy.py |
0259cfec-6015-444e-944d-75eaa64eb07f | y_4.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_4.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def fourth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor,
block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.
constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST000 = 1.125
CONST001 = 2.25
CONST002 = 3.0
CONST005 = 2.21852991866236
CONST007 = 9.48683298050514
CONST010 = 20.1246117974981
CONST011 = -18.8248505970167
CONST012 = -13.3111795119741
CONST013 = -10.0623058987491
CONST014 = -9.0
CONST015 = -8.87411967464942
CONST016 = -7.11512473537885
CONST017 = -6.27495019900557
CONST018 = -3.35410196624968
CONST019 = -1.67705098312484
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
Y00 = CONST015 * VAR07 * z - CONST015 * VAR25 * x
Y01 = y * (-CONST011 * VAR26 * x + CONST017 * VAR07)
Y02 = CONST018 * VAR07 * z + x * (CONST010 * VAR17 * z + CONST018 * VAR25)
Y03 = CONST016 * VAR07 * y + x * (CONST007 * VAR16 + CONST016 * VAR26 * y)
Y04 = (CONST000 * VAR06 + CONST000 * VAR24 + CONST002 * VAR15 +
CONST014 * VAR17 * VAR26 + VAR08 * (CONST001 * VAR26 + CONST014 *
VAR17))
Y05 = CONST016 * VAR25 * y + z * (CONST007 * VAR16 + CONST016 * VAR08 * y)
Y06 = -CONST019 * VAR06 + CONST019 * VAR24 + VAR17 * (CONST013 * VAR08 -
CONST013 * VAR26)
Y07 = y * (CONST011 * VAR08 * z - CONST017 * VAR25)
Y08 = CONST005 * VAR06 + CONST005 * VAR24 + CONST012 * VAR08 * VAR26
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_4.py |
99554c85-0a2d-42e8-ab1a-65744f560890 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BC'])
@triton.jit
def chunk_gla_fwd_A_kernel_intra_sub_intra_merge(A, A2, offsets, indices, B:
tl.constexpr, T: tl.constexpr, H: tl.constexpr, BT: tl.constexpr, BC:
tl.constexpr, NK: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
all = B * T
if i_t * BT + i_c * BC >= T:
return
b_A = tl.zeros([BC, BC], dtype=tl.float32)
for i_k in range(0, NK):
if HEAD_FIRST:
p_A = tl.make_block_ptr(A + (i_k * B * H + i_bh) * T * BC, (T,
BC), (BC, 1), (i_t * BT + i_c * BC, 0), (BC, BC), (1, 0))
else:
p_A = tl.make_block_ptr(A + (i_k * all + bos) * H * BC + i_h *
BC, (T, BC), (H * BC, 1), (i_t * BT + i_c * BC, 0), (BC, BC
), (1, 0))
b_A += tl.load(p_A, boundary_check=(0, 1))
if HEAD_FIRST:
p_A2 = tl.make_block_ptr(A2 + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + i_c * BC, i_c * BC), (BC, BC), (1, 0))
else:
p_A2 = tl.make_block_ptr(A2 + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT + i_c * BC, i_c * BC), (BC, BC), (1, 0))
tl.store(p_A2, b_A.to(A2.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py |
58b3463b-151f-4a0e-bc35-134133839e16 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_fwd_kernel_V(q, v, z, h, o, A, s_k_h, s_k_t, s_k_d, s_v_h,
s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_p = tl.maximum(i_t * BT - 1, 0)
b_o = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), (
s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_zp = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), (i_p *
K + i_k * BK,), (BK,), (0,))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_z = tl.load(p_z, boundary_check=(0, 1))
b_zp = tl.load(p_zp, boundary_check=(0,))
b_q = (b_q * tl.exp(b_zp[None, :] - b_z)).to(b_q.dtype)
b_h = tl.load(p_h, boundary_check=(0, 1))
if i_k >= 0:
b_o += tl.dot(b_q, b_h, allow_tf32=False)
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT,
0), (BT, BT), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_o += tl.dot(b_A, b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
466944e0-8df6-43af-8e5a-f9a4513cce97 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4, 8]], key=['BT', 'BK', 'BV'])
@triton.jit
def fwd_recompute_w_u_kernel(k, v, beta, w, u, Aw, Au, offsets, indices, T:
tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl
.constexpr, BK: tl.constexpr, BV: tl.constexpr, HEAD_FIRST: tl.
constexpr, USE_OFFSETS: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_Au = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_Au = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_Au = tl.load(p_Au, boundary_check=(0, 1))
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_u = tl.make_block_ptr(u + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_u = tl.make_block_ptr(u + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_vb = (b_v * b_beta[:, None]).to(b_v.dtype)
b_u = tl.dot(b_Au, b_vb, allow_tf32=False)
tl.store(p_u, b_u.to(p_u.dtype.element_ty), boundary_check=(0, 1))
tl.debug_barrier()
b_Au = None
if HEAD_FIRST:
p_Aw = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_Aw = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
b_Aw = tl.load(p_Aw, boundary_check=(0, 1))
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_w = tl.dot(b_Aw, b_kb)
tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/wy_fast.py |
b7a78b7c-edba-48ab-88b8-0d8b4fa84948 | normalization.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/normalization.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_normalization_backward_kernel_2(grad_weight_partial_ptr,
grad_bias_partial_ptr, grad_weight_ptr, grad_bias_ptr, m, n_cols,
has_bias: tl.constexpr, accumulate_grad: tl.constexpr, block_size_m: tl
.constexpr, block_size_n: tl.constexpr):
pid = tl.program_id(0)
cols = pid * block_size_n + tl.arange(0, block_size_n)
grad_weight_partial_sum = tl.zeros((block_size_m, block_size_n), dtype=
tl.float32)
if has_bias:
grad_bias_partial_sum = tl.zeros((block_size_m, block_size_n),
dtype=tl.float32)
col_mask = cols < n_cols
for i in range(0, m, block_size_m):
rows = i + tl.arange(0, block_size_m)
mask = (rows[:, None] < m) & (cols[None, :] < n_cols)
offsets = rows[:, None] * n_cols + cols[None, :]
grad_weight_partial_sum += tl.load(grad_weight_partial_ptr +
offsets, mask=mask, other=0.0)
if has_bias:
grad_bias_partial_sum += tl.load(grad_bias_partial_ptr +
offsets, mask=mask, other=0.0)
grad_weight = tl.sum(grad_weight_partial_sum, axis=0)
if accumulate_grad:
grad_weight = tl.load(grad_weight_ptr + cols, mask=col_mask
) + grad_weight
tl.store(grad_weight_ptr + cols, grad_weight, mask=col_mask)
if has_bias:
grad_bias = tl.sum(grad_bias_partial_sum, axis=0)
if accumulate_grad:
grad_bias = tl.load(grad_bias_ptr + cols, mask=col_mask
) + grad_bias
tl.store(grad_bias_ptr + cols, grad_bias, mask=col_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/normalization.py |
26e4d111-f4aa-445c-91fa-fcdaff284554 | k_layer_norm.py | cpuhrsch/torchfused | torchfused/triton/k_layer_norm.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _layer_norm_no_affine_bwd(DX, DY, Y, V, stride, N, **META):
row = tl.program_id(0)
cols = tl.arange(0, META['BLOCK_SIZE_N'])
y_ptrs = Y + row * stride + cols
dy_ptrs = DY + row * stride + cols
y = tl.load(y_ptrs, mask=cols < N, other=0).to(tl.float32)
dy = tl.load(dy_ptrs, mask=cols < N, other=0).to(tl.float32)
rstd = tl.load(V + row)
xhat = tl.where(cols < N, y, 0.0)
wdy = tl.where(cols < N, dy, 0.0)
mean1 = tl.sum(xhat * wdy, axis=0) / N
mean2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * mean1 + mean2)) * rstd
_store(dx, DX, stride, N, META)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py |
8cfb5a84-099f-41cc-9d64-f30e70e6e39b | quantization.py | neuro-ml/kerops | kerops/kernels/quantization.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _QuantUint8Window_impl(input_ptr, output_ptr, numel, window, BLOCK_SIZE:
tl.constexpr):
tid = tl.program_id(0)
input_ptr += tid * BLOCK_SIZE
output_ptr += tid * BLOCK_SIZE
offset = tl.arange(0, BLOCK_SIZE)
mask = offset < numel - tid * BLOCK_SIZE
input = tl.load(input_ptr + offset, mask=mask).to(tl.float32)
input = tl.minimum(tl.maximum(input, -window), window)
input = (input + window) / (2 * window)
input *= 255
input = input.to(tl.uint8)
tl.store(output_ptr + offset, input, mask=mask)
| {
"Data Type": [
"uint8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/quantization.py |
ecb649cc-99a1-47b3-abec-69aff2b74328 | kernel_benchmark.py | ruikangliu/FlatQuant | benchmarks/kernel_benchmark.py | 9d3032065f1688cb3f71ebc8166df6d91440e871 | 0 | @triton.autotune(configs=[triton.Config({}, num_stages=2, num_warps=4),
triton.Config({}, num_stages=2, num_warps=2), triton.Config({},
num_stages=3, num_warps=4), triton.Config({}, num_stages=3, num_warps=2
), triton.Config({}, num_stages=4, num_warps=4), triton.Config({},
num_stages=4, num_warps=2)], key=['B', 'M', 'N'])
@triton.jit
def matmul_kernel(a_ptr, b_ptr, c_ptr, res_ptr, output_scale, B, M: tl.
constexpr, N: tl.constexpr, np2_M: tl.constexpr, np2_N: tl.constexpr,
stride_am, stride_ak, stride_bb, stride_bk, stride_bn, stride_ck,
stride_cn, stride_resb, stride_resm, stride_resn, BLOCK_SIZE_M: tl.
constexpr, is_split: tl.constexpr):
"""
a @ b @ c
a [M, M]
b [B, M, N]
c [N, N]
now only supports BLOCK_SIZE_M == triton.next_power_of_2(BLOCK_SIZE_M)
"""
pid = tl.program_id(axis=0)
batch_id = tl.program_id(axis=1) + tl.program_id(axis=2) * tl.num_programs(
axis=1)
pid_m = pid
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = tl.arange(0, np2_N) % N
offs_k = tl.arange(0, np2_M)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
b_ptrs = b_ptr + batch_id * stride_bb.to(tl.int64) + (offs_k[:, None] *
stride_bk + offs_bn[None, :] * stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, np2_N), dtype=tl.float32)
a = tl.load(a_ptrs, mask=offs_k[None, :] < M, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < M, other=0.0)
accumulator += tl.dot(a, b)
tmp_ab = accumulator.to(tl.float16)
offs_cn = tl.arange(0, np2_N) % N
offs_k = tl.arange(0, np2_N)
c_ptrs = c_ptr + (offs_k[:, None] * stride_ck + offs_cn[None, :] *
stride_cn)
c = tl.load(c_ptrs, mask=offs_k[:, None] < N, other=0.0)
accumulator = 0
accumulator += tl.dot(tmp_ab, c)
if is_split:
res = accumulator.to(tl.float16)
offs_resm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_resn = tl.arange(0, np2_N)
res_ptrs = res_ptr + stride_resb.to(tl.int64
) * batch_id + stride_resm * offs_resm[:, None
] + stride_resn * offs_resn[None, :]
res_mask = (offs_resm[:, None] < M) & (offs_resn[None, :] < N)
tl.store(res_ptrs, res, mask=res_mask)
else:
abs_src_val = tl.abs(accumulator)
max_src_val = tl.max(abs_src_val)
scale = max_src_val / 7.0
quant_val = libdevice.llrint(accumulator / scale)
quant_val = max(-8, min(quant_val, 7))
quant_val = quant_val.reshape(BLOCK_SIZE_M, np2_N // 2, 2,
can_reorder=False)
quant_val_even, quant_val_odd = quant_val.split()
quant_val_odd = quant_val_odd << 4
res = tl.zeros((BLOCK_SIZE_M, np2_N // 2), dtype=tl.int8)
res = res | quant_val_odd & 240
res = res | quant_val_even & 15
offs_resm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_resn = tl.arange(0, np2_N // 2)
res_ptrs = res_ptr + stride_resb.to(tl.int64
) * batch_id + stride_resm * offs_resm[:, None
] + stride_resn * offs_resn[None, :]
res_mask = (offs_resm[:, None] < M) & (offs_resn[None, :] < N // 2)
tl.store(res_ptrs, res, mask=res_mask)
tl.store(output_scale + batch_id, scale.to(tl.float16))
| {
"Data Type": [
"fp32",
"fp16",
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ruikangliu/FlatQuant/blob/9d3032065f1688cb3f71ebc8166df6d91440e871/benchmarks/kernel_benchmark.py |
06a01956-3562-48af-87d8-0ba21f8d29e7 | fused_moe_a8w8.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/fused_moe_a8w8.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _fused_moe_a8w8_kernel(A, B, C, alpha_row_ptr, alpha_col_ptr,
topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr,
num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am,
stride_ak, stride_be, stride_bn, stride_bk, stride_cm, stride_cn,
stride_scale_be, stride_scale_bn, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr)
if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded:
return
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = A + (offs_token[:, None] // top_k * stride_am + offs_k[None, :
] * stride_ak)
off_experts = tl.load(expert_ids_ptr + pid_m)
b_ptrs = B + off_experts * stride_be + (offs_bn[None, :] * stride_bn +
offs_k[:, None] * stride_bk)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.int32)
_A0 = tl.zeros([1, 1], dtype=a_ptrs.dtype.element_ty)
_B0 = tl.zeros([1, 1], dtype=b_ptrs.dtype.element_ty)
lo = 0
hi = tl.cdiv(K, BLOCK_SIZE_K)
for k in range(lo, hi - 1):
a = tl.load(a_ptrs, mask=token_mask[:, None], other=_A0)
b = tl.load(b_ptrs)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
for k in range(hi - 1, hi):
a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K -
k * BLOCK_SIZE_K), other=_A0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K,
other=_B0)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
alpha_row_ptrs = alpha_row_ptr + offs_token // top_k
alpha_col_ptrs = alpha_col_ptr + off_experts * stride_scale_be + offs_cn
_ALPHA0 = tl.zeros([1], dtype=alpha_row_ptr.dtype.element_ty)
alpha_row = tl.load(alpha_row_ptrs, mask=token_mask, other=_ALPHA0).to(tl
.float32)
alpha_col = tl.load(alpha_col_ptrs, mask=offs_cn < N, other=_ALPHA0).to(tl
.float32)
accumulator = accumulator * alpha_row[:, None]
accumulator = accumulator * alpha_col[None, :]
if MUL_ROUTED_WEIGHT:
moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask,
other=0)
accumulator = accumulator * moe_weight[:, None]
accumulator = accumulator.to(tl.float16)
c_ptrs = C + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]
c_mask = token_mask[:, None] & (offs_cn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Top-K Selection"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_a8w8.py |
18bdbd68-3013-4acb-9efe-c2827d61c4ee | y_7.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_7.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def seventh_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset <
output_numel)
g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask=
output_row_offset + 7 < output_numel)
g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask=
output_row_offset + 8 < output_numel)
g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask=
output_row_offset + 9 < output_numel)
g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask=
output_row_offset + 10 < output_numel)
g_11 = tl.load(sph_grad_ptr + output_row_offset + 11, mask=
output_row_offset + 11 < output_numel)
g_12 = tl.load(sph_grad_ptr + output_row_offset + 12, mask=
output_row_offset + 12 < output_numel)
g_13 = tl.load(sph_grad_ptr + output_row_offset + 13, mask=
output_row_offset + 13 < output_numel)
g_14 = tl.load(sph_grad_ptr + output_row_offset + 14, mask=
output_row_offset + 14 < output_numel)
CONST000 = 1.66389743899677
CONST001 = 3.0
CONST003 = 5.0
CONST004 = 3.32779487799353
CONST009 = 11.7655316231354
CONST012 = 16.5555704843566
CONST014 = 20.4939015319192
CONST016 = 22.0740939791422
CONST018 = 23.5310632462709
CONST019 = 20.4939015319192
CONST020 = 27.1108834234519
CONST022 = 33.1111409687132
CONST024 = 36.7901566319036
CONST025 = 36.7901566319036
CONST026 = 38.4260653723485
CONST027 = 38.4260653723485
CONST029 = 38.4260653723485
CONST030 = 44.1481879582843
CONST032 = -4.9916923169903
CONST037 = 47.0621264925417
CONST039 = 56.2781179722634
CONST044 = -441.481879582843
CONST045 = -441.481879582843
CONST048 = 76.852130744697
CONST049 = 76.852130744697
CONST050 = -8.47215106982872
CONST054 = 110.370469895711
CONST055 = 110.370469895711
CONST056 = -399.335385359224
CONST057 = 117.655316231354
CONST058 = 122.963409191515
CONST059 = 122.963409191515
CONST061 = -376.497011940334
CONST062 = -376.497011940334
CONST064 = 141.186379477625
CONST066 = 147.160626527614
CONST067 = 153.704261489394
CONST069 = -350.955726374425
CONST072 = 203.331625675889
CONST073 = 203.331625675889
CONST074 = -307.408522978788
CONST075 = -9.60651634308713
CONST076 = -9.37968632871057
CONST079 = -281.390589861317
CONST080 = -1.66389743899677
CONST081 = -266.223590239483
CONST082 = -263.216794780819
CONST084 = -263.216794780818
CONST085 = -250.998007960223
CONST089 = 281.390589861317
CONST091 = -220.740939791422
CONST092 = -220.740939791422
CONST093 = -199.667692679612
CONST094 = -1.60108605718119
CONST095 = -187.593726574211
CONST096 = -177.482393492989
CONST097 = -9.60651634308712
CONST098 = -9.1975391579759
CONST100 = -153.704261489394
CONST101 = -147.160626527614
CONST102 = -140.695294930659
CONST104 = -133.111795119741
CONST105 = -133.111795119741
CONST106 = -125.499003980111
CONST107 = -125.499003980111
CONST109 = -105.286717912327
CONST110 = -101.665812837945
CONST111 = -99.833846339806
CONST112 = -101.665812837945
CONST113 = -4.80325817154356
CONST114 = -81.3326502703558
CONST115 = -81.3326502703557
CONST116 = -76.852130744697
CONST117 = -75.2994023880668
CONST119 = -70.5931897388126
CONST121 = -66.2222819374265
CONST122 = -66.5558975598707
CONST123 = -66.5558975598707
CONST124 = -62.7495019900557
CONST125 = -56.2781179722634
CONST126 = -55.1852349478554
CONST127 = -55.1852349478554
CONST128 = -50.8329064189723
CONST129 = -50.8329064189723
CONST130 = -562.781179722634
CONST131 = -47.0621264925418
CONST132 = -50.8329064189724
CONST133 = -44.1481879582843
CONST134 = -44.3705983732471
CONST135 = -40.6663251351779
CONST136 = -40.6663251351779
CONST137 = -8.31948719498384
CONST138 = -37.6497011940334
CONST139 = -33.2779487799353
CONST140 = -29.9501539019418
CONST141 = -25.4164532094862
CONST142 = -25.4164532094862
CONST143 = -23.5310632462709
CONST144 = -532.447180478965
CONST145 = -19.2130326861743
CONST146 = -17.5477863187212
CONST147 = -12.8765548211663
CONST148 = -11.6472820729774
CONST149 = -11.2076024002683
CONST150 = -9.1975391579759
CONST151 = -11.0370469895711
CONST152 = -11.7655316231354
CONST153 = -12.8765548211663
CONST154 = -4.80325817154356
CONST155 = -3.32779487799353
CONST156 = -1.60108605718119
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR04 = VAR07 * VAR07
VAR05 = VAR07 * VAR08
VAR16 = y * y * y
VAR17 = y * y
VAR13 = VAR16 * VAR16
VAR14 = VAR16 * VAR17
VAR15 = VAR17 * VAR17
VAR25 = z * z * z
VAR26 = z * z
VAR22 = VAR25 * VAR25
VAR23 = VAR25 * VAR26
VAR24 = VAR26 * VAR26
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += g_0 * (CONST082 * VAR08 * VAR24 - CONST084 * VAR06 * VAR26 +
CONST146 * VAR04 - CONST146 * VAR22) + g_1 * y * (CONST039 * VAR23 +
CONST089 * VAR06 * z + CONST130 * VAR08 * VAR25) + g_10 * (CONST155 *
VAR23 * x + VAR25 * (-CONST105 * VAR17 * x + CONST139 * VAR07) + z *
(-CONST056 * VAR07 * VAR17 + CONST081 * VAR15 * x + CONST140 * VAR05)
) + g_11 * (VAR16 * (CONST044 * VAR26 * x - CONST101 * VAR07) + y *
(CONST054 * VAR24 * x - CONST091 * VAR07 * VAR26 + CONST121 * VAR05)
) + g_12 * (CONST022 * VAR23 * x + VAR25 * (CONST024 * VAR07 +
CONST045 * VAR17 * x) + z * (-CONST044 * VAR07 * VAR17 + CONST126 *
VAR05)) + g_13 * y * (CONST079 * VAR24 * x + CONST125 * VAR05 -
CONST130 * VAR07 * VAR26) + g_14 * (-CONST069 * VAR07 * VAR25 +
CONST109 * VAR05 * z + CONST109 * VAR23 * x) + g_2 * (CONST001 *
VAR08 * (CONST091 * VAR17 * VAR26 - CONST150 * VAR24) + CONST003 *
VAR06 * (CONST012 * VAR26 + CONST016 * VAR17) + CONST055 * VAR17 *
VAR24 + CONST147 * VAR04 + CONST150 * VAR22) + g_3 * (VAR16 * (
CONST044 * VAR08 * z + CONST066 * VAR25) + y * (-CONST091 * VAR06 *
z + CONST133 * VAR23)) + g_4 * (CONST001 * VAR08 * (CONST122 *
VAR17 * VAR26 + CONST134 * VAR15 - CONST137 * VAR24) + CONST003 *
VAR06 * (CONST000 * VAR26 - CONST139 * VAR17) - CONST032 * VAR22 -
CONST105 * VAR15 * VAR26 + CONST111 * VAR17 * VAR24 + CONST148 * VAR04
) + g_5 * (CONST001 * VAR08 * (CONST106 * VAR16 * z - CONST131 *
VAR25 * y) + CONST057 * VAR06 * y * z + CONST107 * VAR16 * VAR25 -
CONST117 * VAR14 * z - CONST143 * VAR23 * y) + g_6 * (CONST001 *
VAR08 * (CONST116 * VAR15 - CONST116 * VAR17 * VAR26 + CONST154 *
VAR24) + CONST003 * VAR06 * (CONST026 * VAR17 + CONST113 * VAR26) +
CONST014 * VAR13 + CONST027 * VAR17 * VAR24 + CONST116 * VAR15 *
VAR26 + CONST149 * VAR04 + CONST156 * VAR22) + g_7 * (CONST114 *
VAR14 * x + VAR16 * (CONST072 * VAR07 + CONST073 * VAR26 * x) + y *
(CONST110 * VAR07 * VAR26 + CONST128 * VAR05 + CONST129 * VAR24 * x)
) + g_8 * (CONST075 * VAR23 * x + VAR25 * (-CONST100 * VAR17 * x +
CONST145 * VAR07) + z * (CONST067 * VAR07 * VAR17 + CONST097 *
VAR05 + CONST100 * VAR15 * x)) + g_9 * (-CONST085 * VAR07 * VAR16 +
CONST117 * VAR14 * x + y * (CONST018 * VAR24 * x + CONST119 * VAR05 +
CONST131 * VAR07 * VAR26))
g_y += g_1 * (CONST039 * VAR23 * x + CONST095 * VAR07 * VAR25 -
CONST125 * VAR05 * z) + g_10 * (CONST123 * VAR23 * y + VAR25 * (-
CONST096 * VAR16 - CONST105 * VAR08 * y) + z * (-CONST093 * VAR06 *
y + CONST144 * VAR08 * VAR16)) + g_11 * (CONST001 * VAR17 * (
CONST025 * VAR06 + CONST025 * VAR24 + CONST092 * VAR08 * VAR26) -
CONST126 * VAR06 * VAR26 - CONST126 * VAR08 * VAR24 + CONST151 *
VAR04 + CONST151 * VAR22) + g_12 * (CONST030 * VAR23 * y + CONST045 *
VAR08 * VAR25 * y - CONST092 * VAR06 * y * z) + g_13 * (CONST076 *
VAR04 - CONST076 * VAR22 - CONST102 * VAR06 * VAR26 + CONST102 *
VAR08 * VAR24) + g_2 * (CONST030 * VAR05 * y + CONST045 * VAR07 *
VAR26 * y - CONST092 * VAR24 * x * y) + g_3 * (CONST001 * VAR17 * (
CONST066 * VAR25 * x + CONST101 * VAR07 * z) - CONST133 * VAR05 * z +
CONST133 * VAR23 * x) + g_4 * (-CONST123 * VAR05 * y + VAR07 * (
CONST096 * VAR16 + CONST104 * VAR26 * y) + x * (CONST093 * VAR24 *
y - CONST144 * VAR16 * VAR26)) + g_5 * (-CONST143 * VAR05 * z +
VAR07 * (CONST062 * VAR17 * z - CONST131 * VAR25) + x * (CONST061 *
VAR17 * VAR25 - CONST062 * VAR15 * z - CONST143 * VAR23)) + g_6 * (
CONST048 * VAR05 * y + VAR07 * (CONST074 * VAR16 - CONST100 * VAR26 *
y) + x * (CONST058 * VAR14 + CONST074 * VAR16 * VAR26 - CONST116 *
VAR24 * y)) + g_7 * (CONST001 * VAR17 * (-CONST112 * VAR08 * VAR26 -
CONST128 * VAR06 - CONST128 * VAR24) + CONST003 * VAR15 * (CONST135 *
VAR08 + CONST136 * VAR26) + CONST020 * VAR13 + CONST050 * VAR04 +
CONST050 * VAR22 + CONST141 * VAR06 * VAR26 + CONST142 * VAR08 * VAR24
) + g_8 * (CONST048 * VAR23 * y + VAR25 * (CONST074 * VAR16 -
CONST100 * VAR08 * y) + z * (CONST049 * VAR06 * y + CONST059 *
VAR14 + CONST074 * VAR08 * VAR16)) + g_9 * (CONST001 * VAR17 * (-
CONST124 * VAR06 + CONST124 * VAR24) + CONST003 * VAR15 * (CONST138 *
VAR08 - CONST138 * VAR26) + CONST009 * VAR08 * VAR24 + CONST152 *
VAR04 + CONST152 * VAR06 * VAR26 - CONST152 * VAR22)
g_z += g_0 * (CONST069 * VAR07 * VAR25 - CONST109 * VAR05 * z -
CONST109 * VAR23 * x) + g_1 * y * (-CONST079 * VAR24 * x - CONST125 *
VAR05 + CONST130 * VAR07 * VAR26) + g_10 * (CONST001 * VAR26 * (-
CONST123 * VAR08 * VAR17 - CONST134 * VAR15 + CONST137 * VAR06) +
CONST003 * VAR24 * (CONST080 * VAR08 + CONST139 * VAR17) + CONST032 *
VAR04 + CONST105 * VAR08 * VAR15 - CONST111 * VAR06 * VAR17 -
CONST148 * VAR22) + g_11 * (VAR16 * (CONST044 * VAR08 * z -
CONST101 * VAR25) + y * (CONST054 * VAR06 * z - CONST091 * VAR08 *
VAR25 + CONST121 * VAR23)) + g_12 * (CONST001 * VAR26 * (CONST091 *
VAR08 * VAR17 - CONST098 * VAR06) + CONST003 * VAR24 * (CONST012 *
VAR08 + CONST016 * VAR17) + CONST055 * VAR06 * VAR17 + CONST098 *
VAR04 + CONST153 * VAR22) + g_13 * y * (-CONST079 * VAR06 * z -
CONST125 * VAR23 + CONST130 * VAR08 * VAR25) + g_14 * (-CONST082 *
VAR06 * VAR26 + CONST084 * VAR08 * VAR24 + CONST146 * VAR04 -
CONST146 * VAR22) + g_2 * (CONST022 * VAR05 * z + VAR07 * (CONST025 *
VAR25 + CONST045 * VAR17 * z) + x * (-CONST044 * VAR17 * VAR25 +
CONST127 * VAR23)) + g_3 * (VAR16 * (-CONST045 * VAR26 * x +
CONST101 * VAR07) + y * (CONST091 * VAR24 * x - CONST133 * VAR05)
) + g_4 * (CONST004 * VAR05 * z + VAR07 * (CONST104 * VAR17 * z -
CONST139 * VAR25) + x * (CONST056 * VAR17 * VAR25 - CONST081 *
VAR15 * z - CONST140 * VAR23)) + g_5 * (-CONST143 * VAR05 * y +
VAR07 * (CONST064 * VAR26 * y + CONST106 * VAR16) + x * (CONST057 *
VAR24 * y + CONST061 * VAR16 * VAR26 - CONST117 * VAR14)) + g_6 * (
CONST097 * VAR05 * z + VAR07 * (-CONST100 * VAR17 * z + CONST145 *
VAR25) + x * (CONST075 * VAR23 + CONST100 * VAR15 * z - CONST100 *
VAR17 * VAR25)) + g_7 * (CONST115 * VAR14 * z + VAR16 * (CONST072 *
VAR25 + CONST073 * VAR08 * z) + y * (CONST112 * VAR08 * VAR25 +
CONST128 * VAR23 + CONST132 * VAR06 * z)) + g_8 * (CONST001 * VAR26 *
(-CONST116 * VAR08 * VAR17 + CONST116 * VAR15 + CONST154 * VAR06) +
CONST003 * VAR24 * (CONST026 * VAR17 + CONST154 * VAR08) + CONST019 *
VAR13 + CONST029 * VAR06 * VAR17 + CONST094 * VAR04 + CONST116 *
VAR08 * VAR15 + CONST149 * VAR22) + g_9 * (CONST085 * VAR16 * VAR25 -
CONST117 * VAR14 * z + y * (CONST037 * VAR08 * VAR25 - CONST119 *
VAR23 + CONST143 * VAR06 * z))
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_7.py |
cf434cf1-eaa7-43a8-af57-ed65644e78a1 | tritonFun.py | microsoft/Givens-Orthogonal-Backprop | rotMat/triton/tritonFun.py | 3040fa287aacbf07be56eb12ddd7c513f7800191 | 0 | @triton.jit
def _forward_kernel(c_ptr, s_ptr, u_ptr, col_stride, row_stride, **meta):
n, n_tilde, dead_index, d_max, tournament_step, BLOCK_SIZE = meta['N'
], meta['N_TILDE'], meta['DEAD_INDEX'], meta['D_MAX'], meta['STEP'
], meta['BLOCK_SIZE']
pid_x = tl.program_id(axis=0)
temp = n_tilde - 1
i = pid_x + tournament_step
if pid_x == 0:
i = 0
if i >= n_tilde:
i -= temp
j = temp - pid_x + tournament_step
if j >= n_tilde:
j -= temp
if i > j:
i, j = j, i
if (j == dead_index) | (j > d_max) & (i > d_max):
return
theta_offset = i * n - (i + 2) * (i + 1) // 2 + j
c = tl.load(c_ptr + theta_offset)
s = tl.load(s_ptr + theta_offset)
offsets = tl.program_id(axis=1) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
output_offsets_i = i * row_stride + offsets * col_stride
output_offsets_j = j * row_stride + offsets * col_stride
maximum = n * row_stride + n * col_stride
maski = output_offsets_i < maximum
maskj = output_offsets_j < maximum
ui = tl.load(u_ptr + output_offsets_i, mask=maski)
uj = tl.load(u_ptr + output_offsets_j, mask=maskj)
ioutput = ui * c - uj * s
joutput = uj * c + ui * s
ui = tl.store(u_ptr + output_offsets_i, ioutput, mask=maski)
uj = tl.store(u_ptr + output_offsets_j, joutput, mask=maskj)
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/microsoft/Givens-Orthogonal-Backprop/blob/3040fa287aacbf07be56eb12ddd7c513f7800191/rotMat/triton/tritonFun.py |
ba747cf4-08d3-4408-bf74-1154ad010718 | triton_chunk.py | NX-AI/xlstm-jax | xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py | 6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7 | 0 | @triton.jit
def chunk_mlstm_fwd_kernel_h(q, k, v, C, n, m, m_total, i, f, h, norm,
s_qk_h, s_qk_t, s_qk_d, s_vh_h, s_vh_t, s_vh_d, s_C_h, s_C_t, s_n_h,
scale, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl
.constexpr):
i_v, i_t, i_bC = tl.program_id(0), tl.program_id(1), tl.program_id(2)
h_i = tl.arange(0, BT)
m_s = h_i[:, None] >= h_i[None, :]
b_h = tl.zeros([BT, BV], dtype=tl.float32)
b_s = tl.zeros([BT, BT], dtype=tl.float32)
b_norm = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
p_q = tl.make_block_ptr(q + i_bC * s_qk_h, (T, K), (s_qk_t, s_qk_d),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bC * s_qk_h, (K, T), (s_qk_d, s_qk_t),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_C = tl.make_block_ptr(C + i_bC * s_C_h + i_t * K * V, (K, V), (
s_C_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_n = tl.make_block_ptr(n + i_bC * s_n_h + i_t * K, (K, BV), (1, 0),
(i_k * BK, 0), (BK, BV), (0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_C = tl.load(p_C, boundary_check=(0, 1))
b_n = tl.load(p_n, boundary_check=(0,))
b_h += tl.dot(b_q, b_C, allow_tf32=False)
b_s += tl.dot(b_q, b_k, allow_tf32=False)
b_n2 = tl.dot(b_q, b_n, allow_tf32=False)
b_norm += b_n2
p_f = f + i_bC * T + i_t * BT + tl.arange(0, BT)
b_f = tl.load(p_f)
p_i = i + i_bC * T + i_t * BT + tl.arange(0, BT)
b_i = tl.load(p_i)
b_m = tl.load(m + i_bC * (NT + 1) + i_t)
b_logD = b_i[None, :] + b_f[:, None] - b_f[None, :]
b_logD = tl.where(m_s, b_logD, -float('inf'))
b_mlogD = tl.max(b_logD, axis=1)
b_m_total = tl.maximum(b_f + b_m, b_mlogD)
p_m_total = tl.make_block_ptr(m_total + T * i_bC, (T,), (1,), (i_t * BT
,), (BT,), (0,))
tl.store(p_m_total, b_m_total.to(p_m_total.dtype.element_ty),
boundary_check=(0,))
b_D = tl.math.exp2(b_logD - b_m_total[:, None])
b_h = b_h * tl.math.exp2(b_f + b_m - b_m_total)[:, None] * scale
b_s = b_s * b_D * scale
b_norm = b_norm * tl.math.exp2(b_f + b_m - b_m_total)[:, None] * scale
b_s = tl.where(m_s, b_s, 0)
b_norm += tl.sum(b_s, axis=1)[:, None]
b_norm = tl.abs(b_norm)
b_norm = tl.maximum(b_norm, tl.math.exp2(-b_m_total)[:, None])
tl.store(norm + i_bC * T + i_t * BT + tl.arange(0, BT), tl.max(b_norm,
axis=1))
p_v = tl.make_block_ptr(v + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_h = (b_h + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)) / b_norm
p_h = tl.make_block_ptr(h + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache",
"BSD"
] | https://github.com/NX-AI/xlstm-jax/blob/6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7/xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py |
185656ec-3bff-4006-8c3e-b0f32117c386 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def array_jagged_bmm_kernel(a_ptr, b_ptr, c_ptr, a_offsets_ptr,
b_offsets_ptr, c_offsets_ptr, D, stride_bk, stride_bn, stride_cm,
stride_cn, transpose, max_seq_len, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, allow_tf32: tl.
constexpr):
pid_batch = tl.program_id(2)
pid_m = tl.program_id(1)
pid_n = tl.program_id(0)
batch_offset_am = tl.load(a_offsets_ptr + pid_batch)
batch_offset_bk = tl.load(b_offsets_ptr + pid_batch)
batch_offset_cm = tl.load(c_offsets_ptr + pid_batch)
batch_K = tl.load(b_offsets_ptr + pid_batch + 1) - batch_offset_bk
batch_M = tl.load(c_offsets_ptr + pid_batch + 1) - batch_offset_cm
stride_am = batch_M * (1 - transpose) + 1 * transpose
stride_ak = batch_M * transpose + 1 * (1 - transpose)
batch_K = tl.minimum(batch_K, max_seq_len)
batch_M = tl.minimum(batch_M, max_seq_len)
if batch_K == 0:
return
batch_N = D
if pid_m * BLOCK_SIZE_M >= batch_M or pid_n * BLOCK_SIZE_N >= batch_N:
return
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % batch_M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % batch_N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + batch_offset_am + (offs_am[:, None] * stride_am +
offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + batch_offset_bk * stride_bk + (offs_k[:, None] *
stride_bk + offs_bn[None, :] * stride_bn)
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(batch_K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=offs_k[None, :] < batch_K - k *
BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < batch_K - k *
BLOCK_SIZE_K, other=0.0)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * batch_offset_cm + stride_cm * offs_cm[:, None
] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < batch_M) & (offs_cn[None, :] < batch_N)
tl.store(c_ptrs, c, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
022c4cc0-b308-49be-b921-b32509712645 | empty.py | triton-lang/triton | python/examples/empty.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr):
pass
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/examples/empty.py |
e008b2d4-e5d7-4904-adbb-d6d877357da0 | gemm_a16w8.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/gemm_a16w8.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _triton_gemm_a16w8_per_channel_kernel(A, B, C, scale_b, bias,
zero_points, M, N, K, stride_am, stride_ak, stride_bn, stride_bk,
stride_cm, stride_cn, stride_zpk, stride_zpn, stride_scalek,
stride_scalen, add_bias: tl.constexpr, add_zero_points: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rbn[:, None] * stride_bn + rk[None, :] * stride_bk)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
if add_zero_points:
offs_zero_points = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
zero_points_ptrs = zero_points + offs_zero_points
_ZERO_POINT0 = tl.zeros([1], dtype=zero_points.dtype.element_ty)
zero_points_vals = tl.load(zero_points_ptrs, mask=offs_zero_points <
N, other=_ZERO_POINT0)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
k_remaining = K - k * (BLOCK_K * SPLIT_K)
_A0 = tl.zeros((1, 1), dtype=A.dtype.element_ty)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_A0)
_B0 = tl.zeros((1, 1), dtype=B.dtype.element_ty)
b = tl.load(B, mask=rk[None, :] < k_remaining, other=_B0)
if add_zero_points:
b = b - zero_points_vals[:, None]
b_fp = b.to(A.dtype.element_ty)
b_fp = tl.trans(b_fp)
acc += tl.dot(a, b_fp, out_dtype=tl.float32, allow_tf32=True)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
offs_scale = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
scale_ptrs = scale_b + offs_scale
_SCALE0 = tl.zeros([1], dtype=scale_b.dtype.element_ty)
scales = tl.load(scale_ptrs, mask=offs_scale < N, other=_SCALE0)
acc *= scales
acc = acc.to(C.dtype.element_ty)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
if add_bias:
offs_bias = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
bias_ptrs = bias + offs_bias
_BIAS0 = tl.zeros([1], dtype=bias.dtype.element_ty)
bias_vals = tl.load(bias_ptrs, mask=offs_bias < N, other=_BIAS0)
if pid_z == 0:
acc += bias_vals[None, :]
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w8.py |
0193e573-d1a1-4efc-bba9-db1a33827ad0 | z_order.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/z_order.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_z_unpadded_kernel(xyz_ptr, batch_idx_ptr, code_ptr, space_size,
x_offset, y_offset, z_offset, str_xyz_n, str_xyz_c, N, BLK: tl.
constexpr, ASSIGN_BATCH_INDEX: tl.constexpr):
pid = tl.program_id(0)
offs_n = pid * BLK + tl.arange(0, BLK)
mask = offs_n < N
xyz_ptrs = xyz_ptr + offs_n * str_xyz_n
fx = tl.load(xyz_ptrs + x_offset * str_xyz_c, mask=mask)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_c, mask=mask)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_c, mask=mask)
ret = _calculate_zorder(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
batch_idx_ptrs = batch_idx_ptr + offs_n
batch_idx = tl.load(batch_idx_ptrs, mask=mask).to(tl.int64)
ret |= batch_idx << 48
code_ptrs = code_ptr + offs_n
tl.store(code_ptrs, ret, mask=mask)
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py |
86d2fa4b-e481-4a17-9385-cbf6f0389011 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_dense_flash_attention_fwd_kernel(q_ptr, k_ptr, v_ptr, ab_ptr,
o_ptr, lse_ptr, jagged_offsets_ptr, max_seq_len, stride_ql, stride_qd,
stride_kb, stride_kd, stride_kt, stride_vn, stride_vd, stride_ab_b,
stride_ab_n, stride_ab_t, stride_ob, stride_ot, stride_od, D: tl.
constexpr, T: tl.constexpr, allow_tf32: tl.constexpr, BLOCK_T: tl.
constexpr, BLOCK_L: tl.constexpr, BLOCK_D: tl.constexpr):
pid_t = tl.program_id(0)
pid_batch = tl.program_id(1)
begin = tl.load(jagged_offsets_ptr + pid_batch)
end = tl.load(jagged_offsets_ptr + pid_batch + 1)
length = end - begin
length = tl.minimum(length, max_seq_len)
if length == 0:
return
q_start_ptr = q_ptr + begin * stride_ql
k_start_ptr = k_ptr + pid_batch * stride_kb
ab_start_ptr = ab_ptr + pid_batch * stride_ab_b
v_start_ptr = v_ptr + begin * stride_vn
offs_t = pid_t * BLOCK_T + tl.arange(0, BLOCK_T)
offs_d = tl.arange(0, BLOCK_D)
ki_ptrs = k_start_ptr + offs_d[:, None] * stride_kd + offs_t[None, :
] * stride_kt
ki = tl.load(ki_ptrs, mask=(offs_d[:, None] < D) & (offs_t[None, :] < T
), other=0.0)
mi = tl.zeros([BLOCK_T], dtype=tl.float32) - float('inf')
li = tl.zeros([BLOCK_T], dtype=tl.float32)
oi = tl.zeros([BLOCK_T, BLOCK_D], dtype=tl.float32)
for start_l in range(0, length, BLOCK_L):
offs_l = start_l + tl.arange(0, BLOCK_L)
qj_ptrs = q_start_ptr + offs_l[:, None] * stride_ql + offs_d[None, :
] * stride_qd
qj = tl.load(qj_ptrs, mask=(offs_l[:, None] < length) & (offs_d[
None, :] < D), other=0.0)
qk = tl.dot(qj, ki, allow_tf32=allow_tf32)
ab_ptrs = ab_start_ptr + offs_l[:, None] * stride_ab_n + offs_t[None, :
] * stride_ab_t
abij = tl.load(ab_ptrs, mask=(offs_l[:, None] < length) & (offs_t[
None, :] < T), other=0.0)
qk = qk + abij
mij_hat = tl.max(qk, axis=0)
mi_new = tl.maximum(mi, mij_hat)
pij_hat = tl.exp(qk - mi_new[None, :])
pij_hat = tl.where((offs_l[:, None] < length) & (offs_t[None, :] <
T), pij_hat, 0.0)
lij_hat = tl.sum(pij_hat, axis=0)
alpha = tl.exp(mi - mi_new)
li_new = alpha * li + lij_hat
oi = alpha[:, None] * oi
vj_ptrs = v_start_ptr + offs_l[:, None] * stride_vn + offs_d[None, :
] * stride_vd
vj = tl.load(vj_ptrs, mask=(offs_l[:, None] < length) & (offs_d[
None, :] < D), other=0.0)
pij_hat = pij_hat.to(v_ptr.dtype.element_ty)
oi = oi + tl.dot(tl.trans(pij_hat), vj, allow_tf32=allow_tf32)
mi = mi_new
li = li_new
oi = oi / li[:, None]
lse_ptrs = lse_ptr + pid_batch * T + offs_t
lse_i = mi + tl.log(li)
tl.store(lse_ptrs, lse_i, mask=offs_t < T)
attn_out_ptrs = o_ptr + pid_batch * stride_ob + offs_t[:, None
] * stride_ot + offs_d[None, :] * stride_od
tl.store(attn_out_ptrs, oi, mask=(offs_t[:, None] < T) & (offs_d[None,
:] < D))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
25c4864d-4f48-498d-bd92-b326c89bc547 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def nll_loss(input, size, reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency",
"Single Instance"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
f9b8875f-1555-4a1e-a553-4f8289afd403 | triton_fused_local_attn.py | LouChao98/vqtree | ops/triton_fused_local_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q, K, V, Out, softmax_scale, stride_qb, stride_qh,
stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh,
stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, WINDOW_SIZE: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
Q_block_ptr = tl.make_block_ptr(base=Q + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h *
stride_vh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_vn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q = tl.load(Q_block_ptr)
else:
q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero')
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 1)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 2)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 3)
acc = acc / l_i[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn.py |
d220be62-ce65-4110-8341-90c6b7412373 | scatter_reduce.py | pyg-team/pyg-lib | pyg_lib/ops/scatter_reduce.py | bdd392a7093c5016f42ec7ae1945ca77dbdd97db | 0 | @triton.jit
def _fused_scatter_reduce_forward_kernel(inputs_ptr, index_ptr, out_ptr,
num_feats, num_reductions, numel, REDUCE0, REDUCE1, REDUCE2, REDUCE3,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < numel
inputs = tl.load(inputs_ptr + offsets, mask=mask)
index_offsets = offsets // num_feats
index = tl.load(index_ptr + index_offsets, mask=mask)
if REDUCE0 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + offsets % num_feats
if REDUCE0 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE0 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE0 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE0 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
if REDUCE1 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + num_feats
out_offsets = out_offsets + offsets % num_feats
if REDUCE1 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE1 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
if REDUCE2 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + 2 * num_feats
out_offsets = out_offsets + offsets % num_feats
if REDUCE2 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE2 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
if REDUCE3 > 0:
out_offsets = num_feats * num_reductions * index
out_offsets = out_offsets + 3 * num_feats
out_offsets = out_offsets + offsets % num_feats
if REDUCE3 == 1:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 2:
tl.atomic_add(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 3:
tl.atomic_min(out_ptr + out_offsets, inputs, mask=mask)
elif REDUCE3 == 4:
tl.atomic_max(out_ptr + out_offsets, inputs, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/pyg-team/pyg-lib/blob/bdd392a7093c5016f42ec7ae1945ca77dbdd97db/pyg_lib/ops/scatter_reduce.py |
04221a8d-0acd-483c-9226-c62cc41c67fe | layer_norm.py | chengzeyi/stable-fast | src/sfast/triton/ops/layer_norm.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @triton.jit
def _layer_norm_fwd_fused(X, Y, W, B, Mean, Rstd, stride: tl.constexpr, N:
tl.constexpr, eps, BLOCK_SIZE: tl.constexpr):
row = tl.program_id(0)
Y += row * stride
X += row * stride
if BLOCK_SIZE >= N:
cols = tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N).to(tl.float32)
m2_ = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
weight_ = (cols < N).to(tl.float32)
_mean, _m2, _weight = x, m2_, weight_
else:
_mean = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
_m2 = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
_weight = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N).to(tl.float32)
m2_ = tl.zeros((BLOCK_SIZE,), dtype=tl.float32)
weight_ = (cols < N).to(tl.float32)
if off == 0:
_mean, _m2, _weight = x, m2_, weight_
else:
_mean, _m2, _weight = welford_combine(_mean, _m2, _weight,
x, m2_, weight_)
mean, m2, weight = tl.reduce((_mean, _m2, _weight), 0, welford_combine)
var = m2 / weight
rstd = 1 / tl.sqrt(var + eps)
mean = mean.to(x.dtype)
rstd = rstd.to(x.dtype)
if Mean is not None:
tl.store(Mean + row, mean)
if Rstd is not None:
tl.store(Rstd + row, rstd)
if BLOCK_SIZE >= N:
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < N
if W is None:
w = tl.full((BLOCK_SIZE,), 1.0, dtype=x.dtype)
else:
w = tl.load(W + cols, mask=mask)
if B is None:
b = tl.zeros((BLOCK_SIZE,), dtype=x.dtype)
else:
b = tl.load(B + cols, mask=mask)
x_hat = (x - mean) * rstd
y = x_hat * w + b
tl.store(Y + cols, y, mask=mask)
else:
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
if W is None:
w = tl.full((BLOCK_SIZE,), 1.0, dtype=x.dtype)
else:
w = tl.load(W + cols, mask=mask)
if B is None:
b = tl.zeros((BLOCK_SIZE,), dtype=x.dtype)
else:
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask)
x_hat = (x - mean) * rstd
y = x_hat * w + b
tl.store(Y + cols, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/layer_norm.py |
747983d8-2740-48f1-a5b4-0b2b362c5601 | qkv_concat.py | ai-compiler-study/triton-kernels | triton_kernels/ops/qkv_concat.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def triton_qkv_concat(txt_qkv, img_qkv, out_q_ptr, out_k_ptr, out_v_ptr,
seq_len, num_heads, head_dim, hidden_dim, seq_txt_len, stride_txt_a,
stride_txt_b, stride_img_a, stride_img_b, stride_output_a,
stride_output_b, stride_output_c, XBLOCK: tl.constexpr):
pid = tl.program_id(0)
xoffset = pid * XBLOCK + tl.arange(0, XBLOCK)[:]
seq_idx = xoffset // hidden_dim % seq_len
batch_idx = xoffset // stride_output_a
hidden_dim_idx = xoffset % hidden_dim
headdim_idx = xoffset % head_dim
head_idx = xoffset // head_dim % num_heads
txt_seq_end = tl.full([1], seq_txt_len, tl.int64)
txt_mask = seq_idx < txt_seq_end
img_mask = seq_idx >= txt_seq_end
txt_q_data = tl.load(txt_qkv + (hidden_dim * 0 + hidden_dim_idx +
stride_txt_b * seq_idx + stride_txt_a * batch_idx), txt_mask, other=0.0
).to(tl.float32)
zero_mask = tl.full(txt_q_data.shape, 0.0, txt_q_data.dtype)
masked_txt_q = tl.where(txt_mask, txt_q_data, zero_mask)
img_q_data = tl.load(img_qkv + (-stride_txt_a + hidden_dim * 0 +
hidden_dim_idx + stride_img_b * seq_idx + stride_img_a * batch_idx),
img_mask, other=0.0).to(tl.float32)
zero_mask = tl.full(img_q_data.shape, 0.0, img_q_data.dtype)
masked_img_q = tl.where(img_mask, img_q_data, zero_mask)
out_q = tl.where(txt_mask, masked_txt_q, masked_img_q)
tl.store(out_q_ptr + (headdim_idx + stride_output_c * seq_idx +
stride_output_b * head_idx + stride_output_a * batch_idx), out_q, None)
txt_k_data = tl.load(txt_qkv + (hidden_dim * 1 + hidden_dim_idx +
stride_txt_b * seq_idx + stride_txt_a * batch_idx), txt_mask, other=0.0
).to(tl.float32)
zero_mask = tl.full(txt_k_data.shape, 0.0, txt_k_data.dtype)
masked_txt_q = tl.where(txt_mask, txt_k_data, zero_mask)
img_k_data = tl.load(img_qkv + (-stride_txt_a + hidden_dim * 1 +
hidden_dim_idx + stride_img_b * seq_idx + stride_img_a * batch_idx),
img_mask, other=0.0).to(tl.float32)
zero_mask = tl.full(img_k_data.shape, 0.0, img_k_data.dtype)
masked_img_k = tl.where(img_mask, img_k_data, zero_mask)
out_k = tl.where(txt_mask, masked_txt_q, masked_img_k)
tl.store(out_k_ptr + (headdim_idx + stride_output_c * seq_idx +
stride_output_b * head_idx + stride_output_a * batch_idx), out_k, None)
txt_v_data = tl.load(txt_qkv + (hidden_dim * 2 + hidden_dim_idx +
stride_txt_b * seq_idx + stride_txt_a * batch_idx), txt_mask, other=0.0
).to(tl.float32)
zero_mask = tl.full(txt_v_data.shape, 0.0, txt_v_data.dtype)
masked_txt_v = tl.where(txt_mask, txt_v_data, zero_mask)
img_v_data = tl.load(img_qkv + (-stride_txt_a + hidden_dim * 2 +
hidden_dim_idx + stride_img_b * seq_idx + stride_img_a * batch_idx),
img_mask, other=0.0).to(tl.float32)
zero_mask = tl.full(img_v_data.shape, 0.0, img_v_data.dtype)
masked_img_q = tl.where(img_mask, img_v_data, zero_mask)
output_v = tl.where(txt_mask, masked_txt_v, masked_img_q)
tl.store(out_v_ptr + (headdim_idx + stride_output_c * seq_idx +
stride_output_b * head_idx + stride_output_a * batch_idx), output_v,
None)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/qkv_concat.py |
5d836130-3356-4d7b-9797-41b5b20290c8 | fp8_matmul.py | drisspg/transformer_nuggets | transformer_nuggets/fp8/fp8_matmul.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def apply_scaling(accumulator, a_scale, b_scale, ROW_WISE_SCALING: tl.
constexpr, offs_cm, offs_cn, M, N, stride_a_scale_m, stride_b_scale_n):
if ROW_WISE_SCALING:
a_scales = tl.load(a_scale + offs_cm * stride_a_scale_m, mask=
offs_cm < M, other=0.0)
b_scales = tl.load(b_scale + offs_cn * stride_b_scale_n, mask=
offs_cn < N, other=0.0)
acc_scale = a_scales[:, None] * b_scales[None, :]
else:
acc_scale = a_scale * b_scale
return accumulator * acc_scale
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/fp8_matmul.py |
da11f109-9ed7-4901-b57c-0a34d0fda019 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def chunk_gsa_bwd_k_kernel_intra_dvg(v, g, o, A, do, dv, dg, offsets,
indices, T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl
.constexpr, NG: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl
.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_bh // NG
i_b, i_hq = i_bh // HQ, i_bh % HQ
i_h = i_hq // NG
i_t, i_i = i_c // NC, i_c % NC
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
o_v = i_v * BV + tl.arange(0, BV)
m_v = o_v < V
if i_t * BT + i_i * BC > T:
return
if HEAD_FIRST:
p_gv = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (min(i_t *
BT + i_i * BC + BC, T) - 1) * V + o_v, BV), BV)
else:
p_gv = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V, 1
), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + min(i_t * BT +
i_i * BC + BC, T) - 1) * H * V + i_h * V + o_v, BV), BV)
b_gn = tl.load(p_gn, mask=m_v, other=0)
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_dv = tl.zeros([BC, BV], dtype=tl.float32)
for i_j in range(i_i + 1, NC):
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (BT, T), (1, BT), (
i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
p_A = tl.make_block_ptr(A + (bos * HQ + i_hq) * BT, (BT, T), (1,
HQ * BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
b_g = tl.load(p_g, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_g - b_gn[None, :])).to(b_do.dtype)
b_A = tl.load(p_A, boundary_check=(0, 1))
b_dv += tl.dot(b_A, b_do)
b_dv *= tl.exp(b_gn[None, :] - b_gv)
o_i = tl.arange(0, BC)
o_c = i_i * BC + tl.arange(0, BC)
if HEAD_FIRST:
p_g = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (i_t * BT +
i_i * BC) * V + o_v, BV), BV)
p_A = tl.max_contiguous(tl.multiple_of(A + i_bh * T * BT + (i_t *
BT + i_i * BC) * BT + o_c, BC), BC)
p_do = tl.max_contiguous(tl.multiple_of(do + i_bh * T * V + (i_t *
BT + i_i * BC) * V + o_v, BV), BV)
else:
p_g = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_i *
BC) * H * V + i_h * V + o_v, BV), BV)
p_A = tl.max_contiguous(tl.multiple_of(A + (bos + i_t * BT + i_i *
BC) * HQ * BT + i_hq * BT + o_c, BC), BC)
p_do = tl.max_contiguous(tl.multiple_of(do + (bos + i_t * BT + i_i *
BC) * HQ * V + i_hq * V + o_v, BV), BV)
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
b_A = tl.load(p_A)
b_g = tl.load(p_g, mask=m_v, other=0)
b_do = tl.load(p_do, mask=m_v, other=0)
m_i = o_i[:, None] <= j
b_dv += tl.where(m_i, tl.exp(b_g[None, :] - b_gv) * b_A[:, None] *
b_do[None, :], 0.0)
p_g += (1 if HEAD_FIRST else H) * V
p_A += (1 if HEAD_FIRST else HQ) * BT
p_do += (1 if HEAD_FIRST else HQ) * V
if HEAD_FIRST:
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT +
i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_v = tl.make_block_ptr(v + i_bg * T * V, (T, V), (V, 1), (i_t * BT +
i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_o = tl.make_block_ptr(o + (bos * HQ + i_hq) * V, (T, V), (HQ * V,
1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + (bos * HQ + i_hq) * V, (T, V), (HQ *
V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_v = tl.load(p_v, boundary_check=(0, 1)).to(tl.float32)
b_do = tl.load(p_do, boundary_check=(0, 1)).to(tl.float32)
b_dv = b_dv + tl.load(p_dv, boundary_check=(0, 1)).to(tl.float32)
b_dg = b_o * b_do - b_v * b_dv
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py |
Dataset Description
This dataset contains code snippets from Triton-based projects across GitHub, specifically filtered to include only repositories with permissive licenses (MIT, Apache, BSD, etc.). Each entry in the dataset includes:
- Triton code snippet
- Repository information
- File path
- Commit hash
- Direct GitHub URL to the source code
- License information
- Categorization of the code functionality
Dataset Creation
The dataset was created by:
- Collecting Triton code snippets from public GitHub repositories
- Categorizing the code snippets based on functionality (Using claude)
- Filtering to keep only snippets from repositories with permissive licenses using a custom
should_keep_license
function
License Information
This dataset is released under the MIT License. However, each code snippet in the dataset comes from a repository with its own specific license (all permissive). The license type for each snippet is included in the dataset.
Permissive licenses included in this dataset:
- MIT
- BSD
- APACHE
- CC0
Format and Usage
The dataset is provided in two formats:
- JSON format (
permissive_triton_dataset.json
) - Parquet format (
permissive_triton_dataset.parquet
)
Sample Data Structure
{
"uuid": "...",
"file_name": "example_triton_file.py",
"repo_name": "username/repo",
"file_path": "path/to/file.py",
"commit_hash": "abcdef123456",
"starcount": 42,
"input": "@triton.jit\ndef example_kernel(...):\n ...",
"category": {
"Functionality": ["Category1", "Category2"]
},
"licenses": ["MIT"],
"github_url": "https://github.com/username/repo/blob/abcdef123456/path/to/file.py"
}
Field Descriptions
Field | Description |
---|---|
uuid |
Unique identifier for the entry in the dataset |
file_name |
Name of the source code file |
repo_name |
GitHub repository name in format "username/repo" |
file_path |
Path to the file within the repository |
commit_hash |
Git commit hash for the specific version of the file |
starcount |
Number of stars the repository had at the time of data collection |
input |
The actual Triton code snippet |
category |
Categorization of the code functionality (labeled using Claude) |
licenses |
List of permissive license types applicable to this code |
github_url |
Direct URL to view the file on GitHub at the specific commit |
Category Types
We consider categories in the following domains: Functionality, Data Type, Performance Objective, Parallelization Strategy, and Memory Access Pattern. We optinally add labels to each of these domains per entry to try and describe the data (using claude).
Loading the Dataset
# Using JSON
import json
with open('permissive_triton_dataset.json', 'r') as f:
dataset = json.load(f)
# Using Parquet
import pandas as pd
df = pd.read_parquet('permissive_triton_dataset.parquet')
- Downloads last month
- 120