Upload quantize-ggufs.sh with huggingface_hub
Browse files- quantize-ggufs.sh +24 -0
quantize-ggufs.sh
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
LLAMA_BIN="/data/llama.cpp/build/bin"
|
4 |
+
FP16="shisa-v2-llama3.1-405b-fp16.gguf"
|
5 |
+
IMATRIX="imatrix.dat"
|
6 |
+
NAME="shisa-v2-llama3.1-405b"
|
7 |
+
|
8 |
+
# Q8_0
|
9 |
+
${LLAMA_BIN}/llama-quantize ${FP16} ${NAME}-Q8_0.gguf Q8_0
|
10 |
+
|
11 |
+
# Q4_K_M
|
12 |
+
${LLAMA_BIN}/llama-quantize --imatrix ${IMATRIX} ${FP16} ${NAME}-Q4_K_M.gguf Q4_K_M
|
13 |
+
|
14 |
+
# IQ4_XS
|
15 |
+
${LLAMA_BIN}/llama-quantize --imatrix ${IMATRIX} ${FP16} ${NAME}-IQ4_XS.gguf IQ4_XS
|
16 |
+
|
17 |
+
# IQ3_M
|
18 |
+
${LLAMA_BIN}/llama-quantize --imatrix ${IMATRIX} ${FP16} ${NAME}-IQ3_M.gguf IQ3_M
|
19 |
+
|
20 |
+
# IQ3_XS
|
21 |
+
${LLAMA_BIN}/llama-quantize --imatrix ${IMATRIX} ${FP16} ${NAME}-IQ3_XS.gguf IQ3_XS
|
22 |
+
|
23 |
+
# IQ2_XXS
|
24 |
+
${LLAMA_BIN}/llama-quantize --imatrix ${IMATRIX} ${FP16} ${NAME}-IQ2_XXS.gguf IQ2_XXS
|