Int8 quant for optimized performance on Ampere.

usage

uv venv --python 3.12

uv pip install "sglang[all]" --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python

uv run python -m sglang.launch_server --model-path nytopop/Qwen3-8B-abliterated.w8a8 --quantization w8a8_int8 --reasoning-parser qwen3

creation

from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.modifiers.smoothquant import SmoothQuantModifier
from llmcompressor.transformers.compression.helpers import calculate_offload_device_map

model_id  = "mlabonne/Qwen3-8B-abliterated"
model_out = model_id.split("/")[1] + ".w8a8"

num_samples = 256
max_seq_len = 4096

tokenizer = AutoTokenizer.from_pretrained(model_id)

def preprocess_fn(example):
  return {"text": tokenizer.apply_chat_template(example["messages"], add_generation_prompt=False, tokenize=False)}

ds = load_dataset("neuralmagic/LLM_compression_calibration", split="train")
ds = ds.shuffle().select(range(num_samples))
ds = ds.map(preprocess_fn)

device_map = calculate_offload_device_map(
    model_id, reserve_for_hessians=True, num_gpus=1, torch_dtype="bfloat16"
)

for k, v in device_map.items():
  if v == 'disk':
    device_map[k] = 'cpu'

model = AutoModelForCausalLM.from_pretrained(
  model_id,
  device_map=device_map,
  torch_dtype="bfloat16",
)

recipe = [
  SmoothQuantModifier(
    smoothing_strength=0.7,
  ),
  GPTQModifier(
    sequential=True,
    targets="Linear",
    scheme="W8A8",
    ignore=["lm_head", "re:.*mlp.gate$"],
    dampening_frac=0.05,
  ),
]

oneshot(
  model=model,
  dataset=ds,
  recipe=recipe,
  max_seq_length=max_seq_len,
  num_calibration_samples=num_samples,
  output_dir=model_out,
)
Downloads last month
0
Safetensors
Model size
8.19B params
Tensor type
BF16
·
I8
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for nytopop/Qwen3-8B-abliterated.w8a8

Quantized
(4)
this model

Collection including nytopop/Qwen3-8B-abliterated.w8a8