Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -14,10 +14,10 @@ Currently, upstream sglang doesn't load this quant correctly due to a few minor
|
|
14 |
uv venv --python 3.12
|
15 |
|
16 |
# vllm is needed to load w4a16 quant scheme
|
17 |
-
uv pip install vllm>=0.8.5
|
18 |
|
19 |
# use patched sglang from git
|
20 |
-
uv pip install git+https://github.com/nytopop/sglang.git@qwen-30b-a3b#subdirectory=python[all] --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python
|
21 |
|
22 |
# run
|
23 |
uv run python -m sglang.launch_server --model-path nytopop/Qwen3-30B-A3B.w4a16 --reasoning-parser qwen3 --dtype float16
|
|
|
14 |
uv venv --python 3.12
|
15 |
|
16 |
# vllm is needed to load w4a16 quant scheme
|
17 |
+
uv pip install "vllm>=0.8.5"
|
18 |
|
19 |
# use patched sglang from git
|
20 |
+
uv pip install "git+https://github.com/nytopop/sglang.git@qwen-30b-a3b#subdirectory=python[all]" --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python
|
21 |
|
22 |
# run
|
23 |
uv run python -m sglang.launch_server --model-path nytopop/Qwen3-30B-A3B.w4a16 --reasoning-parser qwen3 --dtype float16
|