Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
```python
|
2 |
+
import sparseml.transformers
|
3 |
+
|
4 |
+
original_model_name = "Xenova/llama2.c-stories110M"
|
5 |
+
output_directory = "output/"
|
6 |
+
final_model_name = "nm-testing/llama2.c-stories110M-pruned2.4"
|
7 |
+
|
8 |
+
dataset = "open_platypus"
|
9 |
+
|
10 |
+
recipe = """
|
11 |
+
test_stage:
|
12 |
+
obcq_modifiers:
|
13 |
+
SparseGPTModifier:
|
14 |
+
sparsity: 0.5
|
15 |
+
sequential_update: true
|
16 |
+
quantize: false
|
17 |
+
mask_structure: '2:4'
|
18 |
+
targets: ['re:model.layers.\d*$']
|
19 |
+
"""
|
20 |
+
|
21 |
+
# Apply SparseGPT to the model
|
22 |
+
sparseml.transformers.oneshot(
|
23 |
+
model_name_or_path=original_model_name,
|
24 |
+
dataset_name=dataset,
|
25 |
+
recipe=recipe,
|
26 |
+
output_dir=output_directory,
|
27 |
+
)
|
28 |
+
|
29 |
+
# Upload the output model to Hugging Face Hub
|
30 |
+
from huggingface_hub import HfApi
|
31 |
+
|
32 |
+
HfApi().upload_folder(
|
33 |
+
folder_path=output_directory,
|
34 |
+
repo_id=final_model_name,
|
35 |
+
)
|
36 |
+
```
|