Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- figures/benchmark.jpg +3 -0
- inference/bf16_cast_block_int8.py +63 -0
- inference/kernel.py +136 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
figures/benchmark.jpg filter=lfs diff=lfs merge=lfs -text
|
figures/benchmark.jpg
ADDED
![]() |
Git LFS Details
|
inference/bf16_cast_block_int8.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from argparse import ArgumentParser
|
4 |
+
from glob import glob
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from safetensors.torch import load_file, save_file
|
9 |
+
from huggingface_hub import snapshot_download
|
10 |
+
|
11 |
+
from kernel import weight_quant
|
12 |
+
|
13 |
+
def main(bf16_path, int8_path, model_name="deepseek-ai/DeepSeek-R1"):
|
14 |
+
torch.set_default_dtype(torch.bfloat16)
|
15 |
+
os.makedirs(int8_path, exist_ok=True)
|
16 |
+
model_index_file = os.path.join(int8_path, "model.safetensors.index.json")
|
17 |
+
|
18 |
+
if not os.path.exists(model_index_file):
|
19 |
+
snapshot_download(
|
20 |
+
repo_id=model_name,
|
21 |
+
allow_patterns=["model.safetensors.index.json"],
|
22 |
+
local_dir=int8_path,
|
23 |
+
local_dir_use_symlinks=False
|
24 |
+
)
|
25 |
+
print(f"model index file downloaded to {model_index_file}")
|
26 |
+
|
27 |
+
with open(model_index_file, "r") as f:
|
28 |
+
model_index = json.load(f)
|
29 |
+
weight_map = model_index["weight_map"]
|
30 |
+
scale_count = len([key for key in weight_map.keys() if key.endswith("_scale_inv")])
|
31 |
+
|
32 |
+
safetensor_files = list(glob(os.path.join(bf16_path, "*.safetensors")))
|
33 |
+
safetensor_files.sort()
|
34 |
+
quant_count = 0
|
35 |
+
for safetensor_file in tqdm(safetensor_files):
|
36 |
+
file_name = os.path.basename(safetensor_file)
|
37 |
+
state_dict = load_file(safetensor_file, device="cuda")
|
38 |
+
new_state_dict = {}
|
39 |
+
for weight_name, weight in state_dict.items():
|
40 |
+
scale_inv_name = f"{weight_name}_scale_inv"
|
41 |
+
if scale_inv_name in weight_map:
|
42 |
+
assert weight.element_size() == 2
|
43 |
+
quant_count += 1
|
44 |
+
int8_weight, scale_inv = weight_quant(weight)
|
45 |
+
new_state_dict[weight_name] = int8_weight
|
46 |
+
new_state_dict[scale_inv_name] = scale_inv
|
47 |
+
else:
|
48 |
+
new_state_dict[weight_name] = weight
|
49 |
+
new_safetensor_file = os.path.join(int8_path, file_name)
|
50 |
+
save_file(new_state_dict, new_safetensor_file)
|
51 |
+
assert quant_count == scale_count
|
52 |
+
print(f"{quant_count} weights are quantized.")
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
parser = ArgumentParser()
|
57 |
+
parser.add_argument("--input-bf16-hf-path", type=str, required=True)
|
58 |
+
parser.add_argument("--output-int8-hf-path", type=str, required=True)
|
59 |
+
parser.add_argument("--model-name", type=str, default="deepseek-ai/DeepSeek-R1")
|
60 |
+
args = parser.parse_args()
|
61 |
+
main(args.input_bf16_hf_path, args.output_int8_hf_path, args.model_name)
|
62 |
+
print("done")
|
63 |
+
|
inference/kernel.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import triton
|
5 |
+
import triton.language as tl
|
6 |
+
from triton import Config
|
7 |
+
|
8 |
+
@triton.jit
|
9 |
+
def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
|
10 |
+
pid = tl.program_id(axis=0)
|
11 |
+
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
12 |
+
x = tl.load(x_ptr + offs).to(tl.float32)
|
13 |
+
s = tl.max(tl.abs(x)) / 448.
|
14 |
+
y = x / s
|
15 |
+
y = y.to(y_ptr.dtype.element_ty)
|
16 |
+
tl.store(y_ptr + offs, y)
|
17 |
+
tl.store(s_ptr + pid, s)
|
18 |
+
|
19 |
+
|
20 |
+
def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
|
21 |
+
assert x.is_contiguous()
|
22 |
+
assert x.size(-1) % block_size == 0
|
23 |
+
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
|
24 |
+
s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
|
25 |
+
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
|
26 |
+
act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
|
27 |
+
return y, s
|
28 |
+
|
29 |
+
|
30 |
+
@triton.jit
|
31 |
+
def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
|
32 |
+
pid_m = tl.program_id(axis=0)
|
33 |
+
pid_n = tl.program_id(axis=1)
|
34 |
+
n = tl.cdiv(N, BLOCK_SIZE)
|
35 |
+
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
36 |
+
offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
37 |
+
offs = offs_m[:, None] * N + offs_n[None, :]
|
38 |
+
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
39 |
+
x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
|
40 |
+
s = tl.load(s_ptr + pid_m * n + pid_n)
|
41 |
+
y = x * s
|
42 |
+
tl.store(y_ptr + offs, y, mask=mask)
|
43 |
+
|
44 |
+
|
45 |
+
def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
|
46 |
+
assert x.is_contiguous() and s.is_contiguous()
|
47 |
+
assert x.dim() == 2 and s.dim() == 2
|
48 |
+
M, N = x.size()
|
49 |
+
y = torch.empty_like(x, dtype=torch.get_default_dtype())
|
50 |
+
grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
|
51 |
+
weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
|
52 |
+
return y
|
53 |
+
|
54 |
+
|
55 |
+
@triton.jit
|
56 |
+
def weight_quant_kernel(x_ptr, y_ptr, s_ptr, M, N, BLOCK_SIZE: tl.constexpr):
|
57 |
+
pid_m = tl.program_id(axis=0)
|
58 |
+
pid_n = tl.program_id(axis=1)
|
59 |
+
n = tl.cdiv(N, BLOCK_SIZE)
|
60 |
+
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
61 |
+
offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
62 |
+
offs = offs_m[:, None] * N + offs_n[None, :]
|
63 |
+
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
64 |
+
x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
|
65 |
+
s = tl.max(tl.abs(x)) / 127.#int8
|
66 |
+
y = x / s
|
67 |
+
y = y.to(y_ptr.dtype.element_ty)
|
68 |
+
tl.store(y_ptr + offs, y, mask=mask)
|
69 |
+
tl.store(s_ptr + pid_m * n + pid_n, s)
|
70 |
+
|
71 |
+
# quant to block int8
|
72 |
+
def weight_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
|
73 |
+
assert x.is_contiguous()
|
74 |
+
assert x.dim() == 2
|
75 |
+
M, N = x.size()
|
76 |
+
y = torch.empty_like(x, dtype=torch.int8)
|
77 |
+
sM, sN = torch.tensor(1.0*M/block_size).ceil().int(), torch.tensor(1.0*N/block_size).ceil().int()
|
78 |
+
s = x.new_empty(sM, sN, dtype=torch.float32)
|
79 |
+
grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
|
80 |
+
weight_quant_kernel[grid](x, y, s, M, N, BLOCK_SIZE=block_size)
|
81 |
+
return y, s
|
82 |
+
|
83 |
+
|
84 |
+
fp8_gemm_configs = [
|
85 |
+
Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
|
86 |
+
for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
|
87 |
+
]
|
88 |
+
|
89 |
+
@triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
|
90 |
+
@triton.jit
|
91 |
+
def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
|
92 |
+
a_s_ptr, b_s_ptr,
|
93 |
+
M, N: tl.constexpr, K: tl.constexpr,
|
94 |
+
BLOCK_SIZE_M: tl.constexpr,
|
95 |
+
BLOCK_SIZE_N: tl.constexpr,
|
96 |
+
BLOCK_SIZE_K: tl.constexpr):
|
97 |
+
pid_m = tl.program_id(axis=0)
|
98 |
+
pid_n = tl.program_id(axis=1)
|
99 |
+
k = tl.cdiv(K, BLOCK_SIZE_K)
|
100 |
+
offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
|
101 |
+
offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
|
102 |
+
offs_k = tl.arange(0, BLOCK_SIZE_K)
|
103 |
+
a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
|
104 |
+
b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
|
105 |
+
a_s_ptrs = a_s_ptr + offs_m * k
|
106 |
+
b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
|
107 |
+
|
108 |
+
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
|
109 |
+
for i in range(k):
|
110 |
+
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
|
111 |
+
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
|
112 |
+
a_s = tl.load(a_s_ptrs)
|
113 |
+
b_s = tl.load(b_s_ptrs)
|
114 |
+
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
|
115 |
+
a_ptrs += BLOCK_SIZE_K
|
116 |
+
b_ptrs += BLOCK_SIZE_K
|
117 |
+
a_s_ptrs += 1
|
118 |
+
b_s_ptrs += 1
|
119 |
+
c = accumulator.to(c_ptr.dtype.element_ty)
|
120 |
+
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
|
121 |
+
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
122 |
+
c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
|
123 |
+
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
124 |
+
tl.store(c_ptrs, c, mask=mask)
|
125 |
+
|
126 |
+
|
127 |
+
def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
|
128 |
+
assert a.is_contiguous() and b.is_contiguous()
|
129 |
+
assert a_s.is_contiguous() and b_s.is_contiguous()
|
130 |
+
K = a.size(-1)
|
131 |
+
M = a.numel() // K
|
132 |
+
N = b.size(0)
|
133 |
+
c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
|
134 |
+
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
|
135 |
+
fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
|
136 |
+
return c
|