altomek commited on
Commit
ed58e7f
·
verified ·
1 Parent(s): 12176b9

quants upload

Browse files

Gemma-2-2B-Opus-Instruct Q4_0... quant

.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Gemma-2-2B-Opus-Instruct-Q4_0_4_4.i.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Gemma-2-2B-Opus-Instruct-Q4_K.i.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Gemma-2-2B-Opus-Instruct-Q8_0.i.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Gemma-2-2B-Opus-Instruct.imatrix filter=lfs diff=lfs merge=lfs -text
Gemma-2-2B-Opus-Instruct-Q4_0_4_4.i.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd8c666bd5af120a626213953f089cc4db42118276b84ee623b0528f36bce83c
3
+ size 1629509728
Gemma-2-2B-Opus-Instruct-Q4_K.i.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c69b8395a349ced93ee674150d4527b5c8eb7db050fa1d58534079a28be3bfd6
3
+ size 1708583008
Gemma-2-2B-Opus-Instruct-Q8_0.i.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b7875c2fceb1b4c0f4c46c812e7654cd85e79598b9b76415350cd50e4ff7487
3
+ size 2784495712
Gemma-2-2B-Opus-Instruct.imatrix ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38ca04f08a23dfacd6632a65b152b6b7749519eae0258bde2d406003ca6bd71c
3
+ size 2375564
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ datasets:
4
+ - kalomaze/Opus_Instruct_25k
5
+ base_model: SaisExperiments/Gemma-2-2B-Opus-Instruct
6
+ tags:
7
+ - gguf
8
+ ---
9
+
10
+ # Gemma-2-2B-Opus-Instruct
11
+
12
+ GGUF quants of https://huggingface.co/SaisExperiments/Gemma-2-2B-Opus-Instruct
README.old.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ datasets:
4
+ - kalomaze/Opus_Instruct_25k
5
+ base_model: google/gemma-2-2b-it
6
+ ---
7
+
8
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/660e67afe23148df7ca321a5/AKOTTIQeLVokbsp_Lm7gP.png)
9
+
10
+ Must put image in repo :3
11
+
12
+ # Basic info
13
+ This is [kalomaze/Opus_Instruct_25k](https://huggingface.co/datasets/kalomaze/Opus_Instruct_25k) over [unsloth/gemma-2-2b-it](https://huggingface.co/unsloth/gemma-2-2b-it)
14
+
15
+ It saw 39.5M tokens
16
+
17
+ I have no idea if it's done right but it took 9 hours B)
18
+
19
+ # Training config:
20
+ ```
21
+ cutoff_len: 1024
22
+ dataset: Opus_Instruct_25K
23
+ dataset_dir: data
24
+ ddp_timeout: 180000000
25
+ do_train: true
26
+ finetuning_type: lora
27
+ flash_attn: auto
28
+ fp16: true
29
+ gradient_accumulation_steps: 8
30
+ include_num_input_tokens_seen: true
31
+ learning_rate: 5.0e-05
32
+ logging_steps: 5
33
+ lora_alpha: 32
34
+ lora_dropout: 0
35
+ lora_rank: 32
36
+ lora_target: all
37
+ lr_scheduler_type: cosine
38
+ max_grad_norm: 1.0
39
+ max_samples: 15000
40
+ model_name_or_path: unsloth/gemma-2-2b-it
41
+ num_train_epochs: 3.0
42
+ optim: adamw_8bit
43
+ output_dir: saves/Gemma-2-2B-Chat/lora/Final_Opus
44
+ packing: false
45
+ per_device_train_batch_size: 2
46
+ plot_loss: true
47
+ preprocessing_num_workers: 16
48
+ quantization_bit: 4
49
+ quantization_method: bitsandbytes
50
+ report_to: none
51
+ save_steps: 100
52
+ stage: sft
53
+ template: gemma
54
+ use_unsloth: true
55
+ warmup_steps: 0
56
+ ```