ehartford commited on
Commit
2c0a848
·
verified ·
1 Parent(s): 9d69f7e

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. chat_template.jinja +37 -0
  3. config.json +71 -0
  4. configuration_deepseek.py +212 -0
  5. generation_config.json +6 -0
  6. model-00001-of-00112.safetensors +3 -0
  7. model-00002-of-00112.safetensors +3 -0
  8. model-00003-of-00112.safetensors +3 -0
  9. model-00004-of-00112.safetensors +3 -0
  10. model-00005-of-00112.safetensors +3 -0
  11. model-00006-of-00112.safetensors +3 -0
  12. model-00007-of-00112.safetensors +3 -0
  13. model-00008-of-00112.safetensors +3 -0
  14. model-00009-of-00112.safetensors +3 -0
  15. model-00010-of-00112.safetensors +3 -0
  16. model-00011-of-00112.safetensors +3 -0
  17. model-00012-of-00112.safetensors +3 -0
  18. model-00013-of-00112.safetensors +3 -0
  19. model-00014-of-00112.safetensors +3 -0
  20. model-00015-of-00112.safetensors +3 -0
  21. model-00016-of-00112.safetensors +3 -0
  22. model-00017-of-00112.safetensors +3 -0
  23. model-00018-of-00112.safetensors +3 -0
  24. model-00019-of-00112.safetensors +3 -0
  25. model-00020-of-00112.safetensors +3 -0
  26. model-00021-of-00112.safetensors +3 -0
  27. model-00022-of-00112.safetensors +3 -0
  28. model-00023-of-00112.safetensors +3 -0
  29. model-00024-of-00112.safetensors +3 -0
  30. model-00025-of-00112.safetensors +3 -0
  31. model-00026-of-00112.safetensors +3 -0
  32. model-00027-of-00112.safetensors +3 -0
  33. model-00028-of-00112.safetensors +3 -0
  34. model-00029-of-00112.safetensors +3 -0
  35. model-00030-of-00112.safetensors +3 -0
  36. model-00031-of-00112.safetensors +3 -0
  37. model-00032-of-00112.safetensors +3 -0
  38. model-00033-of-00112.safetensors +3 -0
  39. model-00034-of-00112.safetensors +3 -0
  40. model-00035-of-00112.safetensors +3 -0
  41. model-00036-of-00112.safetensors +3 -0
  42. model-00037-of-00112.safetensors +3 -0
  43. model-00038-of-00112.safetensors +3 -0
  44. model-00039-of-00112.safetensors +3 -0
  45. model-00040-of-00112.safetensors +3 -0
  46. model-00041-of-00112.safetensors +3 -0
  47. model-00042-of-00112.safetensors +3 -0
  48. model-00043-of-00112.safetensors +3 -0
  49. model-00044-of-00112.safetensors +3 -0
  50. model-00045-of-00112.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% if tools -%}
2
+ {{ '<|im_system|>tool_declare<|im_middle|>' -}}
3
+ {{- tools | tojson -}}
4
+ {{ '<|im_end|>' -}}
5
+ {%- endif -%}
6
+
7
+ {%- for message in messages -%}
8
+ {%- if loop.first and messages[0]['role'] != 'system' -%}
9
+ {{ '<|im_system|>system<|im_middle|>You are a helpful assistant<|im_end|>' }}
10
+ {%- endif -%}
11
+ {%- if message['role'] == 'system' -%}
12
+ {{ '<|im_system|>system<|im_middle|>' }}
13
+ {%- elif message['role'] == 'user' -%}
14
+ {{ '<|im_user|>user<|im_middle|>' }}
15
+ {%- elif message['role'] == 'assistant' -%}
16
+ {{ '<|im_assistant|>assistant<|im_middle|>' }}
17
+ {%- elif message['role'] == 'tool' -%}
18
+ {{ '<|im_system|>tool<|im_middle|>' }}
19
+ {%- endif -%}
20
+
21
+ {%- if message['content'] is string -%}
22
+ {{- message['content'] + '<|im_end|>' -}}
23
+ {%- else -%}
24
+ {%- for content in message['content'] -%}
25
+ {%- if content['type'] == 'image' or 'image' in content or 'image_url' in content -%}
26
+ {{ '<|media_start|>image<|media_content|><|media_pad|><|media_end|>' }}
27
+ {%- else -%}
28
+ {{ content['text'] }}
29
+ {%- endif -%}
30
+ {%- endfor -%}
31
+ {{ '<|im_end|>' }}
32
+ {%- endif -%}
33
+ {%- endfor -%}
34
+
35
+ {%- if add_generation_prompt -%}
36
+ {{ '<|im_assistant|>assistant<|im_middle|>' }}
37
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepseekV3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_deepseek.DeepseekV3Config",
9
+ "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
+ },
12
+ "aux_loss_alpha": 0.001,
13
+ "bos_token_id": 163584,
14
+ "eos_token_id": 163585,
15
+ "ep_size": 1,
16
+ "first_k_dense_replace": 1,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 7168,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 18432,
21
+ "kv_lora_rank": 512,
22
+ "max_position_embeddings": 131072,
23
+ "model_type": "deepseek_v3",
24
+ "moe_intermediate_size": 2048,
25
+ "moe_layer_freq": 1,
26
+ "n_group": 1,
27
+ "n_routed_experts": 384,
28
+ "n_shared_experts": 1,
29
+ "norm_topk_prob": true,
30
+ "num_attention_heads": 64,
31
+ "num_experts_per_tok": 8,
32
+ "num_hidden_layers": 61,
33
+ "num_key_value_heads": 64,
34
+ "num_nextn_predict_layers": 0,
35
+ "pretraining_tp": 1,
36
+ "q_lora_rank": 1536,
37
+ "qk_nope_head_dim": 128,
38
+ "qk_rope_head_dim": 64,
39
+ "quantization_config": {
40
+ "bits": 4,
41
+ "group_size": 64,
42
+ "modules_to_not_convert": [
43
+ ".mlp.gate$"
44
+ ],
45
+ "quant_method": "awq",
46
+ "version": "gemm",
47
+ "zero_point": true
48
+ },
49
+ "rms_norm_eps": 1e-06,
50
+ "rope_scaling": {
51
+ "beta_fast": 1.0,
52
+ "beta_slow": 1.0,
53
+ "factor": 32.0,
54
+ "mscale": 1.0,
55
+ "mscale_all_dim": 1.0,
56
+ "original_max_position_embeddings": 4096,
57
+ "type": "yarn"
58
+ },
59
+ "rope_theta": 50000.0,
60
+ "routed_scaling_factor": 2.827,
61
+ "scoring_func": "sigmoid",
62
+ "seq_aux": true,
63
+ "tie_word_embeddings": false,
64
+ "topk_group": 1,
65
+ "topk_method": "noaux_tc",
66
+ "torch_dtype": "bfloat16",
67
+ "transformers_version": "4.54.1",
68
+ "use_cache": false,
69
+ "v_head_dim": 128,
70
+ "vocab_size": 163840
71
+ }
configuration_deepseek.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/configuration_deepseek.py
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
9
+ class DeepseekV3Config(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
12
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
13
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
14
+
15
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
+ documentation from [`PretrainedConfig`] for more information.
17
+
18
+
19
+ Args:
20
+ vocab_size (`int`, *optional*, defaults to 129280):
21
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
22
+ `inputs_ids` passed when calling [`DeepseekV3Model`]
23
+ hidden_size (`int`, *optional*, defaults to 4096):
24
+ Dimension of the hidden representations.
25
+ intermediate_size (`int`, *optional*, defaults to 11008):
26
+ Dimension of the MLP representations.
27
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
28
+ Dimension of the MoE representations.
29
+ num_hidden_layers (`int`, *optional*, defaults to 32):
30
+ Number of hidden layers in the Transformer decoder.
31
+ num_nextn_predict_layers (`int`, *optional*, defaults to 1):
32
+ Number of nextn predict layers in the DeepSeekV3 Model.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer decoder.
35
+ n_shared_experts (`int`, *optional*, defaults to None):
36
+ Number of shared experts, None means dense model.
37
+ n_routed_experts (`int`, *optional*, defaults to None):
38
+ Number of routed experts, None means dense model.
39
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
40
+ Scaling factor or routed experts.
41
+ topk_method (`str`, *optional*, defaults to `gready`):
42
+ Topk method used in routed gate.
43
+ n_group (`int`, *optional*, defaults to None):
44
+ Number of groups for routed experts.
45
+ topk_group (`int`, *optional*, defaults to None):
46
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
47
+ num_experts_per_tok (`int`, *optional*, defaults to None):
48
+ Number of selected experts, None means dense model.
49
+ moe_layer_freq (`int`, *optional*, defaults to 1):
50
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
51
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
52
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
53
+ \--k dense layers--/
54
+ norm_topk_prob (`bool`, *optional*, defaults to False):
55
+ Whether to normalize the weights of the routed experts.
56
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
57
+ Method of computing expert weights.
58
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
59
+ Auxiliary loss weight coefficient.
60
+ seq_aux = (`bool`, *optional*, defaults to True):
61
+ Whether to compute the auxiliary loss for each individual sample.
62
+ num_key_value_heads (`int`, *optional*):
63
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
64
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
65
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
66
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
67
+ by meanpooling all the original heads within that group. For more details checkout [this
68
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
69
+ `num_attention_heads`.
70
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
+ The non-linear activation function (function or string) in the decoder.
72
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
73
+ The maximum sequence length that this model might ever be used with.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
77
+ The epsilon used by the rms normalization layers.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ pad_token_id (`int`, *optional*):
82
+ Padding token id.
83
+ bos_token_id (`int`, *optional*, defaults to 1):
84
+ Beginning of stream token id.
85
+ eos_token_id (`int`, *optional*, defaults to 2):
86
+ End of stream token id.
87
+ pretraining_tp (`int`, *optional*, defaults to 1):
88
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
89
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
90
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
91
+ issue](https://github.com/pytorch/pytorch/issues/76232).
92
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
93
+ Whether to tie weight embeddings
94
+ rope_theta (`float`, *optional*, defaults to 10000.0):
95
+ The base period of the RoPE embeddings.
96
+ rope_scaling (`Dict`, *optional*):
97
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
98
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
99
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
100
+ `max_position_embeddings` to the expected new maximum.
101
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
102
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
103
+ attention_dropout (`float`, *optional*, defaults to 0.0):
104
+ The dropout ratio for the attention probabilities.
105
+
106
+ ```python
107
+ >>> from transformers import DeepseekV3Model, DeepseekV3Config
108
+
109
+ >>> # Initializing a Deepseek-V3 style configuration
110
+ >>> configuration = DeepseekV3Config()
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "deepseek_v3"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_size=129280,
122
+ hidden_size=7168,
123
+ intermediate_size=18432,
124
+ moe_intermediate_size = 2048,
125
+ num_hidden_layers=61,
126
+ num_nextn_predict_layers=1,
127
+ num_attention_heads=128,
128
+ num_key_value_heads=128,
129
+ n_shared_experts = 1,
130
+ n_routed_experts = 256,
131
+ ep_size = 1,
132
+ routed_scaling_factor = 2.5,
133
+ kv_lora_rank = 512,
134
+ q_lora_rank = 1536,
135
+ qk_rope_head_dim = 64,
136
+ v_head_dim = 128,
137
+ qk_nope_head_dim = 128,
138
+ topk_method = 'noaux_tc',
139
+ n_group = 8,
140
+ topk_group = 4,
141
+ num_experts_per_tok = 8,
142
+ moe_layer_freq = 1,
143
+ first_k_dense_replace = 3,
144
+ norm_topk_prob = True,
145
+ scoring_func = 'sigmoid',
146
+ aux_loss_alpha = 0.001,
147
+ seq_aux = True,
148
+ hidden_act="silu",
149
+ max_position_embeddings=4096,
150
+ initializer_range=0.02,
151
+ rms_norm_eps=1e-6,
152
+ use_cache=True,
153
+ pad_token_id=None,
154
+ bos_token_id=0,
155
+ eos_token_id=1,
156
+ pretraining_tp=1,
157
+ tie_word_embeddings=False,
158
+ rope_theta=10000.0,
159
+ rope_scaling=None,
160
+ attention_bias=False,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.moe_intermediate_size = moe_intermediate_size
169
+ self.num_hidden_layers = num_hidden_layers
170
+ self.num_nextn_predict_layers = num_nextn_predict_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.n_shared_experts = n_shared_experts
173
+ self.n_routed_experts = n_routed_experts
174
+ self.ep_size = ep_size
175
+ self.routed_scaling_factor = routed_scaling_factor
176
+ self.kv_lora_rank = kv_lora_rank
177
+ self.q_lora_rank = q_lora_rank
178
+ self.qk_rope_head_dim = qk_rope_head_dim
179
+ self.v_head_dim = v_head_dim
180
+ self.qk_nope_head_dim = qk_nope_head_dim
181
+ self.topk_method = topk_method
182
+ self.n_group = n_group
183
+ self.topk_group = topk_group
184
+ self.num_experts_per_tok = num_experts_per_tok
185
+ self.moe_layer_freq = moe_layer_freq
186
+ self.first_k_dense_replace = first_k_dense_replace
187
+ self.norm_topk_prob = norm_topk_prob
188
+ self.scoring_func = scoring_func
189
+ self.aux_loss_alpha = aux_loss_alpha
190
+ self.seq_aux = seq_aux
191
+ # for backward compatibility
192
+ if num_key_value_heads is None:
193
+ num_key_value_heads = num_attention_heads
194
+
195
+ self.num_key_value_heads = num_key_value_heads
196
+ self.hidden_act = hidden_act
197
+ self.initializer_range = initializer_range
198
+ self.rms_norm_eps = rms_norm_eps
199
+ self.pretraining_tp = pretraining_tp
200
+ self.use_cache = use_cache
201
+ self.rope_theta = rope_theta
202
+ self.rope_scaling = rope_scaling
203
+ self.attention_bias = attention_bias
204
+ self.attention_dropout = attention_dropout
205
+
206
+ super().__init__(
207
+ pad_token_id=pad_token_id,
208
+ bos_token_id=bos_token_id,
209
+ eos_token_id=eos_token_id,
210
+ tie_word_embeddings=tie_word_embeddings,
211
+ **kwargs,
212
+ )
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": 163585,
4
+ "max_length": 131072,
5
+ "transformers_version": "4.54.1"
6
+ }
model-00001-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:256c3a67fa4654c3a7f4f13dd0eca482c7f7d91d2362896d7e9c0a49c12dc3f8
3
+ size 4998206416
model-00002-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:816ae757d8a2604005c06f62363e95d8f8a5297d1f3febe9490dd035e406548b
3
+ size 4993633312
model-00003-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f8ddf69610b83c704a978324f3aa7dce672d99c1db4a3e3b2bdbc56614fd1b
3
+ size 4998287608
model-00004-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7826ddeda5d7c255f8ccc798a438f339792e26e9aee395eb4396bc4ff4e3c5d
3
+ size 4993633320
model-00005-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11b8c25a4c7dbb92ef665974e4fe5663aa8c81aa332870c9e63fcc55b3c17cd3
3
+ size 4998287608
model-00006-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9abfcd29e530b1ba91d36145cf520a6f17f76fc9c2cb5f8b0d92e5ce8e56ba8
3
+ size 4993633328
model-00007-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c34f1101d669293afc28b1826c13b3dde04b0ad39b61e009b603d0c25c1f3b48
3
+ size 4998287608
model-00008-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91e75bb5b595ec48c0979a437e6de2af54bb390d8bf072f64076e0f501061dbf
3
+ size 4998288320
model-00009-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48f082449cb0741af24b67d36f162ff82312c9219590fb444293e7ffd3fd3c00
3
+ size 4993632616
model-00010-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb728e66174c8937324fff5ded4c3093a9ff691b0a03809d58a134bfd268e4ea
3
+ size 4998288024
model-00011-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1161aa6c61b92beadcd1ab4fe80af58fb1a8290a2f717c0142367349ed60611
3
+ size 4993632912
model-00012-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db3568a45c8677fea02c3a5d9b5c8cb2e8a4748d04af57bb53545eb8413696f
3
+ size 4998287720
model-00013-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bedb70528214886402ac0fcf390929c622927174e9fb9cdc706a7591fb9c1400
3
+ size 4993633216
model-00014-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d83f91aa2957369b7fe57580ccde245d691fefc383bc96b012fef58d308bcd20
3
+ size 4998287608
model-00015-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcd409189cc0575c57aa5f9f15a88380708456b559b6473a0835dce32362b650
3
+ size 4993633328
model-00016-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e10c46380962d01d1ec7a47b3a0c766b35e82d2e9e2ca7f4c2f6d57bf855d33
3
+ size 4998287608
model-00017-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d03a8dc930811f061830c0a0d4129e69b819f263f64d79da034971e382c1f385
3
+ size 4993633320
model-00018-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6904f6912c4a096a0754c6b7b34bb1127758de32bf2f8147e6f0ecf077ce5a91
3
+ size 4998289320
model-00019-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d35d5a74174b80e450d72b1e8feff5661589254e433e64fc5ae78da68b1c0ab
3
+ size 4998290304
model-00020-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f62fc2b83c7fe35905bac742c0112b80daaae24cac01c6b3faa6330a5504347
3
+ size 4993634416
model-00021-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e8542f577f687949e971408ca465ab0285c09b9af8e5f944ab2f7e83a98fb95
3
+ size 4998290008
model-00022-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e34e40af8bf55f8dfea065fb480697fbf845ff589a9f7b49d96a66979545fe5
3
+ size 4993634712
model-00023-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a1dcbb51ab7dfbdb0bef4b4b870ff6dfd48dcf9248fcf079d187c9a61906632
3
+ size 4998289704
model-00024-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16ca7d97f8c16b1c7619c354cd818c43a383449786d62d965325caafef3f5627
3
+ size 4993635008
model-00025-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:972eab6d19c18235821272c8e9a8313388c1c5a04a3cdd8c9a1bdd6499f2ce62
3
+ size 4998289504
model-00026-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c80185c32c726a3eeb2c361903701215e36b7c1024a7d5469b233ca43b1258e1
3
+ size 4993635216
model-00027-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ce8a4d9ec9af8eef676933ad5852eaaae68b39d3766162c1bb931cb6a72d3f6
3
+ size 4998289504
model-00028-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:327067456c5f11f33946d15c3bbff1b7404e6ea6ff0f7b9d957d648e89df0bb1
3
+ size 4993635216
model-00029-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16cdcc83c51ce9016f274a48dab21895c0e4684ba8d1fc3b8956cf3150910439
3
+ size 4998289504
model-00030-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b903597f8c2c311fa7d758eef86e34cf8b53eb8299485a880ad17419b81263c
3
+ size 4998290488
model-00031-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:514e1f3d356062ac94c8acd7076dcffa1a94eeeb44c88f1cc4a191e4e397c5a8
3
+ size 4993634232
model-00032-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:076aacbd111b06a6857e2e66beb2391b38e7c5d6b05be8f265be9f076aefc1ff
3
+ size 4998290104
model-00033-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6f8313439d23dbeeafcc2b8b7f7e96bdb44c944fbddcad3d811510faddc9ac
3
+ size 4993634624
model-00034-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776ed3f9e0c26aaae28d0406e981edfc243b225bbd0cbd3297ea6db9635baa57
3
+ size 4998289800
model-00035-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2c17e1cbf614902d0725c453a9391e46045ab205f610d65dcb8dad110d1da09
3
+ size 4993634920
model-00036-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23782a852a6e55125eca44318ab19b792ff9b67e561665be549c9522d1c6824a
3
+ size 4998289504
model-00037-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f50ac007c0382b504bfdb4338042a2946219cb9690a1395c6e5ddd582471e1b
3
+ size 4993635216
model-00038-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35487497835e7db3435e64346f9f40c0e651498e542815957e7ecf986f92bc7c
3
+ size 4998289504
model-00039-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6201b4be03c4b6d7cde202c832f216f3635073bd09910bac5a609fb24c191aa3
3
+ size 4993635216
model-00040-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5634802faeddae1321e5a9f6bbb62460c4641577149a5837455ae177b5c089aa
3
+ size 4998289504
model-00041-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14112e52bed29525d1dbe8aa2856ca605a4d947964ca35e49c9740318abf230e
3
+ size 4993635216
model-00042-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56e7843d1fd161cbd2d99c335ae987c2e130957b8f23de9718d3230722d5caa
3
+ size 4998289504
model-00043-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a66bc829081cf83d7d59c57936d8f22bad2ca5ec535a02711eb5486589bde01
3
+ size 4998290192
model-00044-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e98881118bdd0e58d39ae836950977a36b623c8e27e5b6223b0a8db6e121ff9
3
+ size 4993634520
model-00045-of-00112.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d53185fc0c184dd605c873fbe259a4ab5cbf21b780684f1bfe2c441ac70a22c2
3
+ size 4998289896