legraphista
commited on
Commit
•
073c67d
1
Parent(s):
f058e37
Upload imatrix.log with huggingface_hub
Browse files- imatrix.log +153 -0
imatrix.log
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
llama_model_loader: loaded meta data with 34 key-value pairs and 642 tensors from c4ai-command-r-plus-08-2024-IMat-GGUF/c4ai-command-r-plus-08-2024.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
|
2 |
+
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
+
llama_model_loader: - kv 0: general.architecture str = command-r
|
4 |
+
llama_model_loader: - kv 1: general.type str = model
|
5 |
+
llama_model_loader: - kv 2: general.name str = C4Ai Command R Plus 08 2024
|
6 |
+
llama_model_loader: - kv 3: general.version str = 08-2024
|
7 |
+
llama_model_loader: - kv 4: general.basename str = c4ai-command-r-plus
|
8 |
+
llama_model_loader: - kv 5: general.size_label str = 104B
|
9 |
+
llama_model_loader: - kv 6: general.license str = cc-by-nc-4.0
|
10 |
+
llama_model_loader: - kv 7: general.languages arr[str,10] = ["en", "fr", "de", "es", "it", "pt", ...
|
11 |
+
llama_model_loader: - kv 8: command-r.block_count u32 = 64
|
12 |
+
llama_model_loader: - kv 9: command-r.context_length u32 = 131072
|
13 |
+
llama_model_loader: - kv 10: command-r.embedding_length u32 = 12288
|
14 |
+
llama_model_loader: - kv 11: command-r.feed_forward_length u32 = 33792
|
15 |
+
llama_model_loader: - kv 12: command-r.attention.head_count u32 = 96
|
16 |
+
llama_model_loader: - kv 13: command-r.attention.head_count_kv u32 = 8
|
17 |
+
llama_model_loader: - kv 14: command-r.rope.freq_base f32 = 8000000.000000
|
18 |
+
llama_model_loader: - kv 15: command-r.attention.layer_norm_epsilon f32 = 0.000010
|
19 |
+
llama_model_loader: - kv 16: general.file_type u32 = 7
|
20 |
+
llama_model_loader: - kv 17: command-r.logit_scale f32 = 0.833333
|
21 |
+
llama_model_loader: - kv 18: command-r.rope.scaling.type str = none
|
22 |
+
llama_model_loader: - kv 19: tokenizer.ggml.model str = gpt2
|
23 |
+
llama_model_loader: - kv 20: tokenizer.ggml.pre str = command-r
|
24 |
+
llama_model_loader: - kv 21: tokenizer.ggml.tokens arr[str,256000] = ["<PAD>", "<UNK>", "<CLS>", "<SEP>", ...
|
25 |
+
llama_model_loader: - kv 22: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, ...
|
26 |
+
llama_model_loader: - kv 23: tokenizer.ggml.merges arr[str,253333] = ["Ġ Ġ", "Ġ t", "e r", "i n", "Ġ a...
|
27 |
+
llama_model_loader: - kv 24: tokenizer.ggml.bos_token_id u32 = 5
|
28 |
+
llama_model_loader: - kv 25: tokenizer.ggml.eos_token_id u32 = 255001
|
29 |
+
llama_model_loader: - kv 26: tokenizer.ggml.padding_token_id u32 = 0
|
30 |
+
llama_model_loader: - kv 27: tokenizer.ggml.add_bos_token bool = true
|
31 |
+
llama_model_loader: - kv 28: tokenizer.ggml.add_eos_token bool = false
|
32 |
+
llama_model_loader: - kv 29: tokenizer.chat_template.tool_use str = {{ bos_token }}{% if messages[0]['rol...
|
33 |
+
llama_model_loader: - kv 30: tokenizer.chat_template.rag str = {{ bos_token }}{% if messages[0]['rol...
|
34 |
+
llama_model_loader: - kv 31: tokenizer.chat_templates arr[str,2] = ["rag", "tool_use"]
|
35 |
+
llama_model_loader: - kv 32: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
|
36 |
+
llama_model_loader: - kv 33: general.quantization_version u32 = 2
|
37 |
+
llama_model_loader: - type f32: 193 tensors
|
38 |
+
llama_model_loader: - type q8_0: 449 tensors
|
39 |
+
llm_load_vocab: special tokens cache size = 37
|
40 |
+
llm_load_vocab: token to piece cache size = 1.8426 MB
|
41 |
+
llm_load_print_meta: format = GGUF V3 (latest)
|
42 |
+
llm_load_print_meta: arch = command-r
|
43 |
+
llm_load_print_meta: vocab type = BPE
|
44 |
+
llm_load_print_meta: n_vocab = 256000
|
45 |
+
llm_load_print_meta: n_merges = 253333
|
46 |
+
llm_load_print_meta: vocab_only = 0
|
47 |
+
llm_load_print_meta: n_ctx_train = 131072
|
48 |
+
llm_load_print_meta: n_embd = 12288
|
49 |
+
llm_load_print_meta: n_layer = 64
|
50 |
+
llm_load_print_meta: n_head = 96
|
51 |
+
llm_load_print_meta: n_head_kv = 8
|
52 |
+
llm_load_print_meta: n_rot = 128
|
53 |
+
llm_load_print_meta: n_swa = 0
|
54 |
+
llm_load_print_meta: n_embd_head_k = 128
|
55 |
+
llm_load_print_meta: n_embd_head_v = 128
|
56 |
+
llm_load_print_meta: n_gqa = 12
|
57 |
+
llm_load_print_meta: n_embd_k_gqa = 1024
|
58 |
+
llm_load_print_meta: n_embd_v_gqa = 1024
|
59 |
+
llm_load_print_meta: f_norm_eps = 1.0e-05
|
60 |
+
llm_load_print_meta: f_norm_rms_eps = 0.0e+00
|
61 |
+
llm_load_print_meta: f_clamp_kqv = 0.0e+00
|
62 |
+
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
|
63 |
+
llm_load_print_meta: f_logit_scale = 8.3e-01
|
64 |
+
llm_load_print_meta: n_ff = 33792
|
65 |
+
llm_load_print_meta: n_expert = 0
|
66 |
+
llm_load_print_meta: n_expert_used = 0
|
67 |
+
llm_load_print_meta: causal attn = 1
|
68 |
+
llm_load_print_meta: pooling type = 0
|
69 |
+
llm_load_print_meta: rope type = 0
|
70 |
+
llm_load_print_meta: rope scaling = none
|
71 |
+
llm_load_print_meta: freq_base_train = 8000000.0
|
72 |
+
llm_load_print_meta: freq_scale_train = 1
|
73 |
+
llm_load_print_meta: n_ctx_orig_yarn = 131072
|
74 |
+
llm_load_print_meta: rope_finetuned = unknown
|
75 |
+
llm_load_print_meta: ssm_d_conv = 0
|
76 |
+
llm_load_print_meta: ssm_d_inner = 0
|
77 |
+
llm_load_print_meta: ssm_d_state = 0
|
78 |
+
llm_load_print_meta: ssm_dt_rank = 0
|
79 |
+
llm_load_print_meta: ssm_dt_b_c_rms = 0
|
80 |
+
llm_load_print_meta: model type = ?B
|
81 |
+
llm_load_print_meta: model ftype = Q8_0
|
82 |
+
llm_load_print_meta: model params = 103.81 B
|
83 |
+
llm_load_print_meta: model size = 102.73 GiB (8.50 BPW)
|
84 |
+
llm_load_print_meta: general.name = C4Ai Command R Plus 08 2024
|
85 |
+
llm_load_print_meta: BOS token = 5 '<BOS_TOKEN>'
|
86 |
+
llm_load_print_meta: EOS token = 255001 '<|END_OF_TURN_TOKEN|>'
|
87 |
+
llm_load_print_meta: PAD token = 0 '<PAD>'
|
88 |
+
llm_load_print_meta: LF token = 136 'Ä'
|
89 |
+
llm_load_print_meta: max token length = 1024
|
90 |
+
ggml_cuda_init: failed to initialize CUDA: no CUDA-capable device is detected
|
91 |
+
llm_load_tensors: ggml ctx size = 0.29 MiB
|
92 |
+
llm_load_tensors: offloading 0 repeating layers to GPU
|
93 |
+
llm_load_tensors: offloaded 0/65 layers to GPU
|
94 |
+
llm_load_tensors: CPU buffer size = 105193.80 MiB
|
95 |
+
.................................................................................................
|
96 |
+
llama_new_context_with_model: n_ctx = 512
|
97 |
+
llama_new_context_with_model: n_batch = 512
|
98 |
+
llama_new_context_with_model: n_ubatch = 512
|
99 |
+
llama_new_context_with_model: flash_attn = 0
|
100 |
+
llama_new_context_with_model: freq_base = 8000000.0
|
101 |
+
llama_new_context_with_model: freq_scale = 1
|
102 |
+
ggml_cuda_host_malloc: failed to allocate 128.00 MiB of pinned memory: no CUDA-capable device is detected
|
103 |
+
llama_kv_cache_init: CPU KV buffer size = 128.00 MiB
|
104 |
+
llama_new_context_with_model: KV self size = 128.00 MiB, K (f16): 64.00 MiB, V (f16): 64.00 MiB
|
105 |
+
ggml_cuda_host_malloc: failed to allocate 0.98 MiB of pinned memory: no CUDA-capable device is detected
|
106 |
+
llama_new_context_with_model: CPU output buffer size = 0.98 MiB
|
107 |
+
ggml_cuda_host_malloc: failed to allocate 524.00 MiB of pinned memory: no CUDA-capable device is detected
|
108 |
+
llama_new_context_with_model: CUDA_Host compute buffer size = 524.00 MiB
|
109 |
+
llama_new_context_with_model: graph nodes = 2312
|
110 |
+
llama_new_context_with_model: graph splits = 1
|
111 |
+
|
112 |
+
system_info: n_threads = 25 (n_threads_batch = 25) / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
113 |
+
compute_imatrix: tokenizing the input ..
|
114 |
+
compute_imatrix: tokenization took 123.015 ms
|
115 |
+
compute_imatrix: computing over 131 chunks with batch_size 512
|
116 |
+
ggml_cuda_host_malloc: failed to allocate 500.00 MiB of pinned memory: no CUDA-capable device is detected
|
117 |
+
compute_imatrix: 78.46 seconds per pass - ETA 2 hours 51.28 minutes
|
118 |
+
[1]4.4754,[2]3.1183,[3]3.0543,[4]3.1257,[5]3.1452,[6]2.9647,[7]3.4957,[8]3.5353,[9]3.9922,
|
119 |
+
save_imatrix: stored collected data after 10 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
120 |
+
[10]4.2216,[11]3.9264,[12]4.1429,[13]4.5250,[14]4.6989,[15]4.9318,[16]5.0805,[17]5.2660,[18]5.4186,[19]5.2379,
|
121 |
+
save_imatrix: stored collected data after 20 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
122 |
+
[20]5.0212,[21]4.9701,[22]4.9920,[23]4.8294,[24]5.0542,[25]5.0570,[26]5.2589,[27]5.2103,[28]4.9143,[29]4.6607,
|
123 |
+
save_imatrix: stored collected data after 30 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
124 |
+
[30]4.6601,[31]4.7309,[32]4.5424,[33]4.3738,[34]4.3251,[35]4.2673,[36]4.2925,[37]4.2917,[38]4.2848,[39]4.3273,
|
125 |
+
save_imatrix: stored collected data after 40 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
126 |
+
[40]4.4122,[41]4.4935,[42]4.3465,[43]4.2058,[44]4.0749,[45]3.9584,[46]3.9262,[47]3.8954,[48]3.9549,[49]4.0163,
|
127 |
+
save_imatrix: stored collected data after 50 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
128 |
+
[50]4.0858,[51]4.0439,[52]4.0656,[53]4.0867,[54]4.1515,[55]4.2470,[56]4.3072,[57]4.3630,[58]4.4014,[59]4.4363,
|
129 |
+
save_imatrix: stored collected data after 60 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
130 |
+
[60]4.4203,[61]4.4170,[62]4.3773,[63]4.3749,[64]4.3988,[65]4.4349,[66]4.4013,[67]4.3903,[68]4.4097,[69]4.4051,
|
131 |
+
save_imatrix: stored collected data after 70 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
132 |
+
[70]4.4180,[71]4.4311,[72]4.4607,[73]4.4538,[74]4.4846,[75]4.4909,[76]4.4932,[77]4.5091,[78]4.5065,[79]4.4899,
|
133 |
+
save_imatrix: stored collected data after 80 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
134 |
+
[80]4.4706,[81]4.4935,[82]4.5351,[83]4.5349,[84]4.5360,[85]4.5345,[86]4.5629,[87]4.5248,[88]4.5205,[89]4.5053,
|
135 |
+
save_imatrix: stored collected data after 90 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
136 |
+
[90]4.5242,[91]4.5545,[92]4.5677,[93]4.5443,[94]4.5248,[95]4.4987,[96]4.4747,[97]4.4548,[98]4.4338,[99]4.4131,
|
137 |
+
save_imatrix: stored collected data after 100 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
138 |
+
[100]4.3916,[101]4.3931,[102]4.4081,[103]4.4587,[104]4.5047,[105]4.5456,[106]4.5822,[107]4.6470,[108]4.6542,[109]4.6807,
|
139 |
+
save_imatrix: stored collected data after 110 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
140 |
+
[110]4.6454,[111]4.6548,[112]4.6499,[113]4.6174,[114]4.5733,[115]4.5519,[116]4.5918,[117]4.5904,[118]4.5891,[119]4.6066,
|
141 |
+
save_imatrix: stored collected data after 120 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
142 |
+
[120]4.6368,[121]4.6489,[122]4.6638,[123]4.6878,[124]4.6952,[125]4.6825,[126]4.6271,[127]4.5741,[128]4.5234,[129]4.4731,
|
143 |
+
save_imatrix: stored collected data after 130 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
144 |
+
[130]4.4303,[131]4.3841,
|
145 |
+
save_imatrix: stored collected data after 131 chunks in c4ai-command-r-plus-08-2024-IMat-GGUF/imatrix.dat
|
146 |
+
|
147 |
+
llama_print_timings: load time = 84740.34 ms
|
148 |
+
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
149 |
+
llama_print_timings: prompt eval time = 10254664.66 ms / 67072 tokens ( 152.89 ms per token, 6.54 tokens per second)
|
150 |
+
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
151 |
+
llama_print_timings: total time = 10263942.91 ms / 67073 tokens
|
152 |
+
|
153 |
+
Final estimate: PPL = 4.3841 +/- 0.05239
|