Upload model
Browse files- README.md +199 -0
- config.json +36 -0
- configuration_dream.py +86 -0
- generation_config.json +16 -0
- generation_utils.py +446 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +346 -0
- modeling_dream.py +824 -0
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
tags: []
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Dream-org/Dream-7B-instruct-v0-preview",
|
3 |
+
"architectures": [
|
4 |
+
"DreamModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_dream.DreamConfig",
|
9 |
+
"AutoModel": "modeling_dream.DreamModel"
|
10 |
+
},
|
11 |
+
"bos_token_id": 151643,
|
12 |
+
"eos_token_id": 151643,
|
13 |
+
"hidden_act": "silu",
|
14 |
+
"hidden_size": 3584,
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 18944,
|
17 |
+
"mask_token_id": 151666,
|
18 |
+
"max_position_embeddings": 131072,
|
19 |
+
"max_window_layers": 28,
|
20 |
+
"model_type": "Dream",
|
21 |
+
"num_attention_heads": 28,
|
22 |
+
"num_hidden_layers": 28,
|
23 |
+
"num_key_value_heads": 4,
|
24 |
+
"pad_token_id": 151643,
|
25 |
+
"rms_norm_eps": 1e-06,
|
26 |
+
"rope_scaling": null,
|
27 |
+
"rope_theta": 1000000.0,
|
28 |
+
"sliding_window": null,
|
29 |
+
"tie_word_embeddings": false,
|
30 |
+
"torch_dtype": "bfloat16",
|
31 |
+
"transformers_version": "4.46.2",
|
32 |
+
"use_cache": true,
|
33 |
+
"use_mrope": false,
|
34 |
+
"use_sliding_window": false,
|
35 |
+
"vocab_size": 152064
|
36 |
+
}
|
configuration_dream.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The Dream team, HKUNLP Group and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Dream model configuration"""
|
16 |
+
|
17 |
+
from transformers.configuration_utils import PretrainedConfig
|
18 |
+
from transformers.modeling_rope_utils import rope_config_validation
|
19 |
+
from transformers.utils import logging
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
class DreamConfig(PretrainedConfig):
|
26 |
+
model_type = "Dream"
|
27 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
28 |
+
|
29 |
+
def __init__(
|
30 |
+
self,
|
31 |
+
vocab_size=151936,
|
32 |
+
hidden_size=4096,
|
33 |
+
intermediate_size=22016,
|
34 |
+
num_hidden_layers=32,
|
35 |
+
num_attention_heads=32,
|
36 |
+
num_key_value_heads=32,
|
37 |
+
hidden_act="silu",
|
38 |
+
max_position_embeddings=32768,
|
39 |
+
initializer_range=0.02,
|
40 |
+
rms_norm_eps=1e-6,
|
41 |
+
use_cache=False, # cache not used in diffusion
|
42 |
+
tie_word_embeddings=False,
|
43 |
+
rope_theta=10000.0,
|
44 |
+
rope_scaling=None,
|
45 |
+
use_sliding_window=False,
|
46 |
+
sliding_window=4096,
|
47 |
+
max_window_layers=28,
|
48 |
+
attention_dropout=0.0,
|
49 |
+
mask_token_id=151666,
|
50 |
+
pad_token_id=151643,
|
51 |
+
**kwargs,
|
52 |
+
):
|
53 |
+
self.vocab_size = vocab_size
|
54 |
+
self.max_position_embeddings = max_position_embeddings
|
55 |
+
self.hidden_size = hidden_size
|
56 |
+
self.intermediate_size = intermediate_size
|
57 |
+
self.num_hidden_layers = num_hidden_layers
|
58 |
+
self.num_attention_heads = num_attention_heads
|
59 |
+
self.use_sliding_window = use_sliding_window
|
60 |
+
self.sliding_window = sliding_window if use_sliding_window else None
|
61 |
+
self.max_window_layers = max_window_layers
|
62 |
+
|
63 |
+
# for backward compatibility
|
64 |
+
if num_key_value_heads is None:
|
65 |
+
num_key_value_heads = num_attention_heads
|
66 |
+
|
67 |
+
self.num_key_value_heads = num_key_value_heads
|
68 |
+
self.hidden_act = hidden_act
|
69 |
+
self.initializer_range = initializer_range
|
70 |
+
self.rms_norm_eps = rms_norm_eps
|
71 |
+
self.use_cache = use_cache
|
72 |
+
self.rope_theta = rope_theta
|
73 |
+
self.rope_scaling = rope_scaling
|
74 |
+
self.attention_dropout = attention_dropout
|
75 |
+
# Validate the correctness of rotary position embeddings parameters
|
76 |
+
# BC: if there is a 'type' field, move it to 'rope_type'.
|
77 |
+
if self.rope_scaling is not None and "type" in self.rope_scaling:
|
78 |
+
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
|
79 |
+
rope_config_validation(self)
|
80 |
+
|
81 |
+
super().__init__(
|
82 |
+
tie_word_embeddings=tie_word_embeddings,
|
83 |
+
**kwargs,
|
84 |
+
)
|
85 |
+
self.mask_token_id = mask_token_id
|
86 |
+
self.pad_token_id = pad_token_id
|
generation_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"alg": "origin",
|
4 |
+
"alg_temp": null,
|
5 |
+
"bos_token_id": 151643,
|
6 |
+
"eos_token_id": 151643,
|
7 |
+
"eps": 0.001,
|
8 |
+
"mask_token_id": null,
|
9 |
+
"output_history": false,
|
10 |
+
"pad_token_id": 151643,
|
11 |
+
"steps": 512,
|
12 |
+
"temperature": 0.0,
|
13 |
+
"top_k": null,
|
14 |
+
"top_p": null,
|
15 |
+
"transformers_version": "4.46.2"
|
16 |
+
}
|
generation_utils.py
ADDED
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The Dream team, HKUNLP Group and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import warnings
|
17 |
+
import copy
|
18 |
+
from dataclasses import dataclass
|
19 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.distributions as dists
|
23 |
+
from torch.nn import functional as F
|
24 |
+
from transformers import __version__
|
25 |
+
from transformers.generation.configuration_utils import (
|
26 |
+
GenerationConfig
|
27 |
+
)
|
28 |
+
from transformers.utils import (
|
29 |
+
ModelOutput,
|
30 |
+
is_torchdynamo_compiling,
|
31 |
+
logging,
|
32 |
+
)
|
33 |
+
|
34 |
+
logger = logging.get_logger(__name__)
|
35 |
+
|
36 |
+
|
37 |
+
def top_p_logits(logits, top_p=None):
|
38 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
39 |
+
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
40 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
41 |
+
# Shift the indices to the right to keep the first token above the threshold
|
42 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
43 |
+
sorted_indices_to_remove[..., 0] = 0
|
44 |
+
|
45 |
+
mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device)
|
46 |
+
mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove)
|
47 |
+
logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min)
|
48 |
+
return logits
|
49 |
+
|
50 |
+
def top_k_logits(logits, top_k=None):
|
51 |
+
top_k = min(top_k, logits.size(-1)) # Safety check
|
52 |
+
# Remove all tokens with a probability less than the last token of the top-k
|
53 |
+
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
54 |
+
logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
|
55 |
+
return logits
|
56 |
+
|
57 |
+
|
58 |
+
def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False):
|
59 |
+
|
60 |
+
if temperature > 0:
|
61 |
+
logits = logits / temperature
|
62 |
+
if top_p is not None and top_p < 1:
|
63 |
+
logits = top_p_logits(logits, top_p)
|
64 |
+
if top_k is not None:
|
65 |
+
logits = top_k_logits(logits, top_k)
|
66 |
+
probs = torch.softmax(logits, dim=-1)
|
67 |
+
|
68 |
+
if temperature > 0:
|
69 |
+
try:
|
70 |
+
x0 = dists.Categorical(probs=probs).sample()
|
71 |
+
confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1)
|
72 |
+
except:
|
73 |
+
confidence, x0 = probs.max(dim=-1)
|
74 |
+
else:
|
75 |
+
confidence, x0 = probs.max(dim=-1)
|
76 |
+
|
77 |
+
if margin_confidence:
|
78 |
+
sorted_probs, _ = torch.sort(probs, dim=-1, descending=True)
|
79 |
+
# Extract top1 and top2 probabilities
|
80 |
+
top1_probs = sorted_probs[:, 0]
|
81 |
+
top2_probs = sorted_probs[:, 1]
|
82 |
+
# Calculate confidence as top1 - top2
|
83 |
+
confidence = top1_probs - top2_probs
|
84 |
+
|
85 |
+
if neg_entropy:
|
86 |
+
epsilon = 1e-10
|
87 |
+
log_probs = torch.log(probs + epsilon)
|
88 |
+
confidence = torch.sum(probs * log_probs, dim=-1)
|
89 |
+
|
90 |
+
return confidence, x0
|
91 |
+
|
92 |
+
|
93 |
+
@dataclass
|
94 |
+
class DreamModelOutput(ModelOutput):
|
95 |
+
sequences: torch.LongTensor = None
|
96 |
+
history: Optional[Tuple[torch.FloatTensor]] = None
|
97 |
+
|
98 |
+
|
99 |
+
class DreamGenerationConfig(GenerationConfig):
|
100 |
+
def __init__(self, **kwargs):
|
101 |
+
self.temperature: float = kwargs.pop("temperature", 0.0)
|
102 |
+
self.top_p: Optional[float] = kwargs.pop("top_p", None)
|
103 |
+
self.top_k: Optional[int] = kwargs.pop("top_k", None)
|
104 |
+
self.max_length = kwargs.pop("max_length", 20)
|
105 |
+
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
|
106 |
+
# diffusion specific params
|
107 |
+
self.eps: float = kwargs.pop("eps", 1e-3)
|
108 |
+
self.steps: int = kwargs.pop("steps", 512)
|
109 |
+
self.alg: str = kwargs.pop("alg", 'origin')
|
110 |
+
self.alg_temp: Optional[float] = kwargs.pop("alg_temp", None)
|
111 |
+
|
112 |
+
# Parameters that define the output variables of `generate`
|
113 |
+
self.num_return_sequences: int = kwargs.pop("num_return_sequences", 1)
|
114 |
+
self.return_dict_in_generate: bool = kwargs.pop("return_dict_in_generate", False)
|
115 |
+
self.output_history: bool = kwargs.pop("output_history", False)
|
116 |
+
|
117 |
+
# Special tokens that can be used at generation time
|
118 |
+
self.mask_token_id = kwargs.pop("mask_token_id", None)
|
119 |
+
self.pad_token_id = kwargs.pop("pad_token_id", None)
|
120 |
+
self.bos_token_id = kwargs.pop("bos_token_id", None)
|
121 |
+
self.eos_token_id = kwargs.pop("eos_token_id", None)
|
122 |
+
|
123 |
+
# Wild card
|
124 |
+
self.generation_kwargs = kwargs.pop("generation_kwargs", {})
|
125 |
+
|
126 |
+
# The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
|
127 |
+
# interface.
|
128 |
+
self._from_model_config = kwargs.pop("_from_model_config", False)
|
129 |
+
self._commit_hash = kwargs.pop("_commit_hash", None)
|
130 |
+
self.transformers_version = kwargs.pop("transformers_version", __version__)
|
131 |
+
|
132 |
+
# Additional attributes without default values
|
133 |
+
if not self._from_model_config:
|
134 |
+
# we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a
|
135 |
+
# model's default configuration file
|
136 |
+
for key, value in kwargs.items():
|
137 |
+
try:
|
138 |
+
setattr(self, key, value)
|
139 |
+
except AttributeError as err:
|
140 |
+
logger.error(f"Can't set {key} with value {value} for {self}")
|
141 |
+
raise err
|
142 |
+
|
143 |
+
# Validate the values of the attributes
|
144 |
+
self.validate(is_init=True)
|
145 |
+
|
146 |
+
def validate(self, is_init=False):
|
147 |
+
pass
|
148 |
+
|
149 |
+
class DreamGenerationMixin:
|
150 |
+
@staticmethod
|
151 |
+
def _expand_inputs_for_generation(
|
152 |
+
expand_size: int = 1,
|
153 |
+
input_ids: Optional[torch.LongTensor] = None,
|
154 |
+
attention_mask: Optional[torch.LongTensor] = None
|
155 |
+
) -> Tuple[torch.LongTensor, Dict[str, Any]]:
|
156 |
+
"""Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]"""
|
157 |
+
# Do not call torch.repeat_interleave if expand_size is 1 because it clones
|
158 |
+
# the input tensor and thus requires more memory although no change is applied
|
159 |
+
if expand_size == 1:
|
160 |
+
return input_ids, attention_mask
|
161 |
+
if input_ids is not None:
|
162 |
+
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
|
163 |
+
if attention_mask is not None:
|
164 |
+
attention_mask = attention_mask.repeat_interleave(expand_size, dim=0)
|
165 |
+
return input_ids, attention_mask
|
166 |
+
|
167 |
+
def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length):
|
168 |
+
"""Performs validation related to the resulting generated length"""
|
169 |
+
|
170 |
+
# Can't throw warnings/exceptions during compilation
|
171 |
+
if is_torchdynamo_compiling():
|
172 |
+
return
|
173 |
+
|
174 |
+
# 1. Max length warnings related to poor parameterization
|
175 |
+
if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
|
176 |
+
# 20 is the default max_length of the generation config
|
177 |
+
warnings.warn(
|
178 |
+
f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the "
|
179 |
+
"generation length. We recommend setting `max_new_tokens` to control the maximum length of the "
|
180 |
+
"generation.",
|
181 |
+
UserWarning,
|
182 |
+
)
|
183 |
+
if input_ids_length >= generation_config.max_length:
|
184 |
+
input_ids_string = "input_ids"
|
185 |
+
raise ValueError(
|
186 |
+
f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to"
|
187 |
+
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
188 |
+
" increasing `max_length` or, better yet, setting `max_new_tokens`."
|
189 |
+
)
|
190 |
+
|
191 |
+
def _prepare_generated_length(
|
192 |
+
self,
|
193 |
+
generation_config,
|
194 |
+
has_default_max_length,
|
195 |
+
input_ids_length,
|
196 |
+
):
|
197 |
+
"""Prepared max and min length in generation configs to avoid clashes between similar attributes"""
|
198 |
+
|
199 |
+
if generation_config.max_new_tokens is not None:
|
200 |
+
if not has_default_max_length and generation_config.max_length is not None:
|
201 |
+
logger.warning(
|
202 |
+
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
203 |
+
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
204 |
+
"Please refer to the documentation for more information. "
|
205 |
+
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
|
206 |
+
)
|
207 |
+
generation_config.max_length = generation_config.max_new_tokens + input_ids_length
|
208 |
+
|
209 |
+
elif has_default_max_length:
|
210 |
+
if generation_config.max_length == DreamGenerationConfig().max_length:
|
211 |
+
generation_config.max_length = generation_config.max_length + input_ids_length
|
212 |
+
max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
|
213 |
+
if max_position_embeddings is not None:
|
214 |
+
generation_config.max_length = min(generation_config.max_length, max_position_embeddings)
|
215 |
+
|
216 |
+
return generation_config
|
217 |
+
|
218 |
+
def _prepare_generation_config(
|
219 |
+
self, generation_config: Optional[DreamGenerationConfig], **kwargs: Dict
|
220 |
+
) -> DreamGenerationConfig:
|
221 |
+
"""
|
222 |
+
Prepares the base generation config, then applies any generation configuration options from kwargs. This
|
223 |
+
function handles retrocompatibility with respect to configuration files.
|
224 |
+
"""
|
225 |
+
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
|
226 |
+
using_model_generation_config = False
|
227 |
+
if generation_config is None:
|
228 |
+
generation_config = DreamGenerationConfig.from_model_config(self.config)
|
229 |
+
using_model_generation_config = True
|
230 |
+
|
231 |
+
# `torch.compile` can't compile `copy.deepcopy`, arguments in `kwargs` that are part of `generation_config`
|
232 |
+
# will mutate the object with `.update`. As such, passing these arguments through `kwargs` is disabled -- an
|
233 |
+
# exception will be raised in `_validate_model_kwargs`
|
234 |
+
if not is_torchdynamo_compiling():
|
235 |
+
generation_config = copy.deepcopy(generation_config)
|
236 |
+
_kwargs = generation_config.update(**kwargs)
|
237 |
+
# If `generation_config` is provided, let's fallback ALL special tokens to the default values for the model
|
238 |
+
if not using_model_generation_config:
|
239 |
+
if generation_config.bos_token_id is None:
|
240 |
+
generation_config.bos_token_id = self.generation_config.bos_token_id
|
241 |
+
if generation_config.eos_token_id is None:
|
242 |
+
generation_config.eos_token_id = self.generation_config.eos_token_id
|
243 |
+
if generation_config.pad_token_id is None:
|
244 |
+
generation_config.pad_token_id = self.generation_config.pad_token_id
|
245 |
+
if generation_config.mask_token_id is None:
|
246 |
+
generation_config.mask_token_id = self.generation_config.mask_token_id
|
247 |
+
|
248 |
+
return generation_config
|
249 |
+
|
250 |
+
def _prepare_special_tokens(
|
251 |
+
self,
|
252 |
+
generation_config: DreamGenerationConfig,
|
253 |
+
device: Optional[Union[torch.device, str]] = None,
|
254 |
+
):
|
255 |
+
"""
|
256 |
+
Prepares the special tokens for generation, overwriting the generation config with their processed versions
|
257 |
+
converted to tensor.
|
258 |
+
|
259 |
+
Note that `generation_config` is changed in place and stops being serializable after this method is called.
|
260 |
+
That is no problem if called within `generate` (`generation_config` is a local copy that doesn't leave the
|
261 |
+
function). However, if called outside `generate`, consider creating a copy of `generation_config` first.
|
262 |
+
"""
|
263 |
+
|
264 |
+
# Convert special tokens to tensors
|
265 |
+
def _tensor_or_none(token, device=None):
|
266 |
+
if token is None:
|
267 |
+
return token
|
268 |
+
|
269 |
+
device = device if device is not None else self.device
|
270 |
+
if isinstance(token, torch.Tensor):
|
271 |
+
return token.to(device)
|
272 |
+
return torch.tensor(token, device=device, dtype=torch.long)
|
273 |
+
|
274 |
+
bos_token_tensor = _tensor_or_none(generation_config.bos_token_id, device=device)
|
275 |
+
eos_token_tensor = _tensor_or_none(generation_config.eos_token_id, device=device)
|
276 |
+
pad_token_tensor = _tensor_or_none(generation_config.pad_token_id, device=device)
|
277 |
+
mask_token_tensor = _tensor_or_none(generation_config.mask_token_id, device=device)
|
278 |
+
|
279 |
+
# We can have more than one eos token. Always treat it as a 1D tensor (when it exists).
|
280 |
+
if eos_token_tensor is not None and eos_token_tensor.ndim == 0:
|
281 |
+
eos_token_tensor = eos_token_tensor.unsqueeze(0)
|
282 |
+
|
283 |
+
# Set pad token if unset (and there are conditions to do so)
|
284 |
+
if pad_token_tensor is None and eos_token_tensor is not None:
|
285 |
+
pad_token_tensor = eos_token_tensor[0]
|
286 |
+
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{pad_token_tensor} for open-end generation.")
|
287 |
+
|
288 |
+
# Update generation config with the updated special tokens tensors
|
289 |
+
# NOTE: this must be written into a different attribute name than the one holding the original special tokens
|
290 |
+
# (in their non-tensor form), in order to enable end-to-end compilation. See
|
291 |
+
# https://pytorch.org/docs/stable/torch.compiler_cudagraph_trees.html#limitations
|
292 |
+
generation_config._bos_token_tensor = bos_token_tensor
|
293 |
+
generation_config._eos_token_tensor = eos_token_tensor
|
294 |
+
generation_config._pad_token_tensor = pad_token_tensor
|
295 |
+
generation_config._mask_token_tensor = mask_token_tensor
|
296 |
+
|
297 |
+
@torch.no_grad()
|
298 |
+
def diffusion_generate(
|
299 |
+
self,
|
300 |
+
inputs: Optional[torch.Tensor] = None,
|
301 |
+
generation_config: Optional[DreamGenerationConfig] = None,
|
302 |
+
**kwargs,
|
303 |
+
) -> Union[DreamModelOutput, torch.LongTensor]:
|
304 |
+
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
|
305 |
+
tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria
|
306 |
+
generation_config = self._prepare_generation_config(generation_config, **kwargs)
|
307 |
+
|
308 |
+
# 2. Define model inputs
|
309 |
+
assert inputs is not None
|
310 |
+
input_ids = inputs
|
311 |
+
device = input_ids.device
|
312 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
313 |
+
self._prepare_special_tokens(generation_config, device=device)
|
314 |
+
|
315 |
+
# 3. Prepare `max_length`.
|
316 |
+
input_ids_length = input_ids.shape[-1]
|
317 |
+
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
318 |
+
generation_config = self._prepare_generated_length(
|
319 |
+
generation_config=generation_config,
|
320 |
+
has_default_max_length=has_default_max_length,
|
321 |
+
input_ids_length=input_ids_length,
|
322 |
+
)
|
323 |
+
|
324 |
+
self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
|
325 |
+
|
326 |
+
# 4. Check input_ids
|
327 |
+
if not is_torchdynamo_compiling() and self.device.type != input_ids.device.type:
|
328 |
+
warnings.warn(
|
329 |
+
"You are calling .generate() with the `input_ids` being on a device type different"
|
330 |
+
f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
|
331 |
+
f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
|
332 |
+
" Please make sure that you have put `input_ids` to the"
|
333 |
+
f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
|
334 |
+
" running `.generate()`.",
|
335 |
+
UserWarning,
|
336 |
+
)
|
337 |
+
if (
|
338 |
+
hasattr(generation_config, "pad_token_id") and
|
339 |
+
torch.any(input_ids == generation_config.pad_token_id) and
|
340 |
+
attention_mask is None
|
341 |
+
):
|
342 |
+
warnings.warn(
|
343 |
+
"Padding was detected but no attention mask is passed here. For correct "
|
344 |
+
"generation results, please set `attention_mask` when batch-padding inputs.",
|
345 |
+
UserWarning,
|
346 |
+
)
|
347 |
+
|
348 |
+
input_ids, attention_mask = self._expand_inputs_for_generation(
|
349 |
+
expand_size=generation_config.num_return_sequences,
|
350 |
+
input_ids=input_ids,
|
351 |
+
attention_mask=attention_mask
|
352 |
+
)
|
353 |
+
|
354 |
+
result = self._sample(
|
355 |
+
input_ids,
|
356 |
+
attention_mask=attention_mask,
|
357 |
+
generation_config=generation_config,
|
358 |
+
)
|
359 |
+
return result
|
360 |
+
|
361 |
+
def _sample(
|
362 |
+
self,
|
363 |
+
input_ids: torch.LongTensor,
|
364 |
+
attention_mask: Optional[torch.LongTensor],
|
365 |
+
generation_config: DreamGenerationConfig,
|
366 |
+
) -> Union[DreamModelOutput, torch.LongTensor]:
|
367 |
+
# init values
|
368 |
+
output_history = generation_config.output_history
|
369 |
+
return_dict_in_generate = generation_config.return_dict_in_generate
|
370 |
+
max_length = generation_config.max_length
|
371 |
+
mask_token_id = generation_config.mask_token_id
|
372 |
+
steps = generation_config.steps
|
373 |
+
eps = generation_config.eps
|
374 |
+
alg = generation_config.alg
|
375 |
+
alg_temp = generation_config.alg_temp
|
376 |
+
temperature = generation_config.temperature
|
377 |
+
top_p = generation_config.top_p
|
378 |
+
top_k = generation_config.top_k
|
379 |
+
|
380 |
+
histories = [] if (return_dict_in_generate and output_history) else None
|
381 |
+
|
382 |
+
# pad input_ids to max_length
|
383 |
+
x = F.pad(input_ids, (0, max_length - input_ids.shape[1]), value=mask_token_id)
|
384 |
+
|
385 |
+
if attention_mask is not None and torch.any(attention_mask == 0.0):
|
386 |
+
# we do not mask the [MASK] tokens so value = 1.0
|
387 |
+
attention_mask = F.pad(attention_mask, (0, max_length - attention_mask.shape[1]), value=1.0)
|
388 |
+
tok_idx = attention_mask.long().cumsum(-1) - 1
|
389 |
+
tok_idx.masked_fill_(attention_mask == 0, 1)
|
390 |
+
# attention_mask is of shape [B, N]
|
391 |
+
# broadcast to [B, 1, N, N]
|
392 |
+
attention_mask = torch.logical_and(
|
393 |
+
attention_mask.unsqueeze(1).unsqueeze(-2),
|
394 |
+
attention_mask.unsqueeze(1).unsqueeze(-1),
|
395 |
+
)
|
396 |
+
else:
|
397 |
+
tok_idx = None
|
398 |
+
attention_mask = "full"
|
399 |
+
|
400 |
+
timesteps = torch.linspace(1, eps, steps + 1, device=x.device)
|
401 |
+
for i in range(steps):
|
402 |
+
mask_index = (x == mask_token_id)
|
403 |
+
logits = self(x, attention_mask, tok_idx).logits
|
404 |
+
logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1)
|
405 |
+
logits = logits[mask_index]
|
406 |
+
t = timesteps[i]
|
407 |
+
s = timesteps[i + 1]
|
408 |
+
|
409 |
+
if alg == 'origin':
|
410 |
+
p_transfer = 1 - s / t if i < steps - 1 else 1
|
411 |
+
x0 = torch.zeros_like(x[mask_index], device=self.device, dtype=torch.long) + mask_token_id
|
412 |
+
transfer_index_t_s = torch.rand(*x0.shape, device=self.device) < p_transfer
|
413 |
+
_, x0[transfer_index_t_s]= sample_tokens(logits[transfer_index_t_s], temperature=temperature, top_p=top_p, top_k=top_k)
|
414 |
+
x[mask_index] = x0.clone()
|
415 |
+
else:
|
416 |
+
if alg == 'maskgit_plus':
|
417 |
+
confidence, x0 = sample_tokens(logits, temperature=temperature, top_p=top_p, top_k=top_k)
|
418 |
+
elif alg == 'topk_margin':
|
419 |
+
confidence, x0 = sample_tokens(logits, temperature=temperature, top_p=top_p, top_k=top_k, margin_confidence=True)
|
420 |
+
elif alg == 'entropy':
|
421 |
+
confidence, x0 = sample_tokens(logits, temperature, top_p=top_p, top_k=top_k, neg_entropy=True)
|
422 |
+
else:
|
423 |
+
raise RuntimeError(f"Unknown alg: {alg}")
|
424 |
+
num_mask_token = mask_index.sum()
|
425 |
+
number_transfer_tokens = int(num_mask_token * (1 - s / t)) if i < steps - 1 else num_mask_token
|
426 |
+
if number_transfer_tokens > 0:
|
427 |
+
if alg_temp is None or alg_temp == 0:
|
428 |
+
_, transfer_index = torch.topk(confidence, number_transfer_tokens)
|
429 |
+
else:
|
430 |
+
confidence = confidence / alg_temp
|
431 |
+
confidence = F.softmax(confidence, dim=-1)
|
432 |
+
transfer_index = torch.multinomial(confidence, num_samples=number_transfer_tokens)
|
433 |
+
x0_ = torch.zeros_like(x0, device=self.device, dtype=torch.long) + mask_token_id
|
434 |
+
x0_[transfer_index] = x0[transfer_index].clone()
|
435 |
+
x[mask_index] = x0_
|
436 |
+
|
437 |
+
if histories is not None:
|
438 |
+
histories.append(x.clone())
|
439 |
+
|
440 |
+
if return_dict_in_generate:
|
441 |
+
return DreamModelOutput(
|
442 |
+
sequences=x,
|
443 |
+
history=histories,
|
444 |
+
)
|
445 |
+
else:
|
446 |
+
return x
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3014d84950f2f3afcce622508b6625e9cd911b9c340f4206e35e6a3177aec993
|
3 |
+
size 4877660776
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78fc44a7eae29c3d13bba010ca12584f4220798d41c9e617579a33c47222aa57
|
3 |
+
size 4932751008
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eac5afaf9a0e0b4a23c7ac1dbe5f538ccdf9fac5c0db7919fd9625a92c378e48
|
3 |
+
size 4330865200
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:398da65f1f599ca8d594a9d7a077a3b7c4eb0ddfdff8e7e004c3d259712b3cb7
|
3 |
+
size 1089994880
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 15231233024
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
104 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
116 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
236 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
248 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
284 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
296 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
308 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
344 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
345 |
+
}
|
346 |
+
}
|
modeling_dream.py
ADDED
@@ -0,0 +1,824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The Dream team, HKUNLP Group and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT and Qwen implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT and Qwen used by the Meta AI and Qwen team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
"""PyTorch Dream model."""
|
21 |
+
|
22 |
+
import math
|
23 |
+
from typing import List, Optional, Tuple, Union
|
24 |
+
import os
|
25 |
+
import torch
|
26 |
+
import torch.utils.checkpoint
|
27 |
+
from torch import nn
|
28 |
+
|
29 |
+
from transformers.activations import ACT2FN
|
30 |
+
from transformers.cache_utils import Cache, DynamicCache
|
31 |
+
from transformers.modeling_outputs import (
|
32 |
+
BaseModelOutput,
|
33 |
+
MaskedLMOutput,
|
34 |
+
)
|
35 |
+
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
|
36 |
+
from transformers.modeling_utils import PreTrainedModel
|
37 |
+
from transformers.utils import (
|
38 |
+
add_start_docstrings,
|
39 |
+
add_start_docstrings_to_model_forward,
|
40 |
+
is_flash_attn_2_available,
|
41 |
+
is_flash_attn_greater_or_equal_2_10,
|
42 |
+
logging,
|
43 |
+
)
|
44 |
+
from transformers import PretrainedConfig
|
45 |
+
from .configuration_dream import DreamConfig
|
46 |
+
from .generation_utils import DreamGenerationMixin, DreamGenerationConfig
|
47 |
+
|
48 |
+
if is_flash_attn_2_available():
|
49 |
+
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
50 |
+
|
51 |
+
|
52 |
+
logger = logging.get_logger(__name__)
|
53 |
+
|
54 |
+
|
55 |
+
_CHECKPOINT_FOR_DOC = "Dream-7B"
|
56 |
+
_CONFIG_FOR_DOC = "DreamConfig"
|
57 |
+
|
58 |
+
|
59 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Dream
|
60 |
+
class DreamRMSNorm(nn.Module):
|
61 |
+
def __init__(self, hidden_size, eps=1e-6):
|
62 |
+
"""
|
63 |
+
DreamRMSNorm is equivalent to T5LayerNorm
|
64 |
+
"""
|
65 |
+
super().__init__()
|
66 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
67 |
+
self.variance_epsilon = eps
|
68 |
+
|
69 |
+
def forward(self, hidden_states):
|
70 |
+
input_dtype = hidden_states.dtype
|
71 |
+
hidden_states = hidden_states.to(torch.float32)
|
72 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
73 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
74 |
+
return self.weight * hidden_states.to(input_dtype)
|
75 |
+
|
76 |
+
def extra_repr(self):
|
77 |
+
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
78 |
+
|
79 |
+
|
80 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Dream
|
81 |
+
class DreamRotaryEmbedding(nn.Module):
|
82 |
+
def __init__(
|
83 |
+
self,
|
84 |
+
dim=None,
|
85 |
+
max_position_embeddings=2048,
|
86 |
+
base=10000,
|
87 |
+
device=None,
|
88 |
+
scaling_factor=1.0,
|
89 |
+
rope_type="default",
|
90 |
+
config: Optional[DreamConfig] = None,
|
91 |
+
):
|
92 |
+
super().__init__()
|
93 |
+
# TODO (joao): remove the `if` below, only used for BC
|
94 |
+
self.rope_kwargs = {}
|
95 |
+
if config is None:
|
96 |
+
logger.warning_once(
|
97 |
+
"`DreamRotaryEmbedding` can now be fully parameterized by passing the model config through the "
|
98 |
+
"`config` argument. All other arguments will be removed in v4.46"
|
99 |
+
)
|
100 |
+
self.rope_kwargs = {
|
101 |
+
"rope_type": rope_type,
|
102 |
+
"factor": scaling_factor,
|
103 |
+
"dim": dim,
|
104 |
+
"base": base,
|
105 |
+
"max_position_embeddings": max_position_embeddings,
|
106 |
+
}
|
107 |
+
self.rope_type = rope_type
|
108 |
+
self.max_seq_len_cached = max_position_embeddings
|
109 |
+
self.original_max_seq_len = max_position_embeddings
|
110 |
+
else:
|
111 |
+
# BC: "rope_type" was originally "type"
|
112 |
+
if config.rope_scaling is not None:
|
113 |
+
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
114 |
+
else:
|
115 |
+
self.rope_type = "default"
|
116 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
117 |
+
self.original_max_seq_len = config.max_position_embeddings
|
118 |
+
|
119 |
+
self.config = config
|
120 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
121 |
+
|
122 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
|
123 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
124 |
+
self.original_inv_freq = self.inv_freq
|
125 |
+
|
126 |
+
def reset_parameters(self):
|
127 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, self.inv_freq.device, **self.rope_kwargs)
|
128 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
129 |
+
self.original_inv_freq = self.inv_freq
|
130 |
+
|
131 |
+
|
132 |
+
def _dynamic_frequency_update(self, position_ids, device):
|
133 |
+
"""
|
134 |
+
dynamic RoPE layers should recompute `inv_freq` in the following situations:
|
135 |
+
1 - growing beyond the cached sequence length (allow scaling)
|
136 |
+
2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
|
137 |
+
"""
|
138 |
+
seq_len = torch.max(position_ids) + 1
|
139 |
+
if seq_len > self.max_seq_len_cached: # growth
|
140 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(
|
141 |
+
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
142 |
+
)
|
143 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
144 |
+
self.max_seq_len_cached = seq_len
|
145 |
+
|
146 |
+
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
147 |
+
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
148 |
+
self.max_seq_len_cached = self.original_max_seq_len
|
149 |
+
|
150 |
+
@torch.no_grad()
|
151 |
+
def forward(self, x, position_ids):
|
152 |
+
if "dynamic" in self.rope_type:
|
153 |
+
self._dynamic_frequency_update(position_ids, device=x.device)
|
154 |
+
|
155 |
+
# Core RoPE block
|
156 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
157 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
158 |
+
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
|
159 |
+
device_type = x.device.type
|
160 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
161 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
162 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
163 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
164 |
+
cos = emb.cos()
|
165 |
+
sin = emb.sin()
|
166 |
+
|
167 |
+
# Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
|
168 |
+
cos = cos * self.attention_scaling
|
169 |
+
sin = sin * self.attention_scaling
|
170 |
+
|
171 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
172 |
+
|
173 |
+
|
174 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
175 |
+
def rotate_half(x):
|
176 |
+
"""Rotates half the hidden dims of the input."""
|
177 |
+
x1 = x[..., : x.shape[-1] // 2]
|
178 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
179 |
+
return torch.cat((-x2, x1), dim=-1)
|
180 |
+
|
181 |
+
|
182 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
183 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
184 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
185 |
+
|
186 |
+
Args:
|
187 |
+
q (`torch.Tensor`): The query tensor.
|
188 |
+
k (`torch.Tensor`): The key tensor.
|
189 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
190 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
191 |
+
position_ids (`torch.Tensor`, *optional*):
|
192 |
+
Deprecated and unused.
|
193 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
194 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
195 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
196 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
197 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
198 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
199 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
200 |
+
Returns:
|
201 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
202 |
+
"""
|
203 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
204 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
205 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
206 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
207 |
+
return q_embed, k_embed
|
208 |
+
|
209 |
+
|
210 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Dream
|
211 |
+
class DreamMLP(nn.Module):
|
212 |
+
def __init__(self, config):
|
213 |
+
super().__init__()
|
214 |
+
self.hidden_size = config.hidden_size
|
215 |
+
self.intermediate_size = config.intermediate_size
|
216 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
217 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
218 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
219 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
220 |
+
|
221 |
+
def forward(self, hidden_state):
|
222 |
+
return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
|
223 |
+
|
224 |
+
|
225 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
226 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
227 |
+
"""
|
228 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
229 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
230 |
+
"""
|
231 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
232 |
+
if n_rep == 1:
|
233 |
+
return hidden_states
|
234 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
235 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
236 |
+
|
237 |
+
|
238 |
+
class DreamAttention(nn.Module):
|
239 |
+
"""
|
240 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
241 |
+
and "Generating Long Sequences with Sparse Transformers".
|
242 |
+
"""
|
243 |
+
|
244 |
+
def __init__(self, config: DreamConfig, layer_idx: Optional[int] = None):
|
245 |
+
super().__init__()
|
246 |
+
self.config = config
|
247 |
+
self.layer_idx = layer_idx
|
248 |
+
if layer_idx is None:
|
249 |
+
logger.warning_once(
|
250 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
251 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
252 |
+
"when creating this class."
|
253 |
+
)
|
254 |
+
|
255 |
+
self.hidden_size = config.hidden_size
|
256 |
+
self.num_heads = config.num_attention_heads
|
257 |
+
self.head_dim = self.hidden_size // self.num_heads
|
258 |
+
self.num_key_value_heads = config.num_key_value_heads
|
259 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
260 |
+
self.max_position_embeddings = config.max_position_embeddings
|
261 |
+
self.rope_theta = config.rope_theta
|
262 |
+
self.is_causal = False
|
263 |
+
self.attention_dropout = config.attention_dropout
|
264 |
+
|
265 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
266 |
+
raise ValueError(
|
267 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
268 |
+
f" and `num_heads`: {self.num_heads})."
|
269 |
+
)
|
270 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
|
271 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
272 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
273 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
274 |
+
|
275 |
+
self.rotary_emb = DreamRotaryEmbedding(config=self.config)
|
276 |
+
|
277 |
+
def forward(
|
278 |
+
self,
|
279 |
+
hidden_states: torch.Tensor,
|
280 |
+
attention_mask: Optional[torch.Tensor] = None,
|
281 |
+
position_ids: Optional[torch.LongTensor] = None,
|
282 |
+
past_key_value: Optional[Cache] = None,
|
283 |
+
output_attentions: bool = False,
|
284 |
+
use_cache: bool = False,
|
285 |
+
cache_position: Optional[torch.LongTensor] = None,
|
286 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
|
287 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
288 |
+
bsz, q_len, _ = hidden_states.size()
|
289 |
+
|
290 |
+
query_states = self.q_proj(hidden_states)
|
291 |
+
key_states = self.k_proj(hidden_states)
|
292 |
+
value_states = self.v_proj(hidden_states)
|
293 |
+
|
294 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
295 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
296 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
297 |
+
|
298 |
+
if position_embeddings is None:
|
299 |
+
logger.warning_once(
|
300 |
+
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
|
301 |
+
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
|
302 |
+
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
|
303 |
+
"removed and `position_embeddings` will be mandatory."
|
304 |
+
)
|
305 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
306 |
+
else:
|
307 |
+
cos, sin = position_embeddings
|
308 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
309 |
+
|
310 |
+
if past_key_value is not None:
|
311 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
312 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
313 |
+
|
314 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
315 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
316 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
317 |
+
|
318 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
319 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
320 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
321 |
+
attn_weights = attn_weights + causal_mask
|
322 |
+
|
323 |
+
# upcast attention to fp32
|
324 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
325 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
326 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
327 |
+
|
328 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
329 |
+
raise ValueError(
|
330 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
331 |
+
f" {attn_output.size()}"
|
332 |
+
)
|
333 |
+
|
334 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
335 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
336 |
+
|
337 |
+
attn_output = self.o_proj(attn_output)
|
338 |
+
|
339 |
+
if not output_attentions:
|
340 |
+
attn_weights = None
|
341 |
+
|
342 |
+
return attn_output, attn_weights, past_key_value
|
343 |
+
|
344 |
+
|
345 |
+
class DreamSdpaAttention(DreamAttention):
|
346 |
+
"""
|
347 |
+
Dream attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
348 |
+
`DreamAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
349 |
+
SDPA API.
|
350 |
+
"""
|
351 |
+
|
352 |
+
# Adapted from DreamAttention.forward
|
353 |
+
def forward(
|
354 |
+
self,
|
355 |
+
hidden_states: torch.Tensor,
|
356 |
+
attention_mask: Optional[torch.Tensor] = None,
|
357 |
+
position_ids: Optional[torch.LongTensor] = None,
|
358 |
+
past_key_value: Optional[Cache] = None,
|
359 |
+
output_attentions: bool = False,
|
360 |
+
use_cache: bool = False,
|
361 |
+
cache_position: Optional[torch.LongTensor] = None,
|
362 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
|
363 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
364 |
+
if output_attentions:
|
365 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
366 |
+
logger.warning_once(
|
367 |
+
"DreamModel is using DreamSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
368 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
369 |
+
)
|
370 |
+
return super().forward(
|
371 |
+
hidden_states=hidden_states,
|
372 |
+
attention_mask=attention_mask,
|
373 |
+
position_ids=position_ids,
|
374 |
+
past_key_value=past_key_value,
|
375 |
+
output_attentions=output_attentions,
|
376 |
+
use_cache=use_cache,
|
377 |
+
)
|
378 |
+
|
379 |
+
bsz, q_len, _ = hidden_states.size()
|
380 |
+
|
381 |
+
query_states = self.q_proj(hidden_states)
|
382 |
+
key_states = self.k_proj(hidden_states)
|
383 |
+
value_states = self.v_proj(hidden_states)
|
384 |
+
|
385 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
386 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
387 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
388 |
+
|
389 |
+
if position_embeddings is None:
|
390 |
+
logger.warning_once(
|
391 |
+
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
|
392 |
+
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
|
393 |
+
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
|
394 |
+
"removed and `position_embeddings` will be mandatory."
|
395 |
+
)
|
396 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
397 |
+
else:
|
398 |
+
cos, sin = position_embeddings
|
399 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
400 |
+
|
401 |
+
if past_key_value is not None:
|
402 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
403 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
404 |
+
|
405 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
406 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
407 |
+
|
408 |
+
# causal_mask = attention_mask
|
409 |
+
# if attention_mask is not None: # no matter the length, we just slice it
|
410 |
+
# causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
411 |
+
|
412 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
413 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
414 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
415 |
+
query_states = query_states.contiguous()
|
416 |
+
key_states = key_states.contiguous()
|
417 |
+
value_states = value_states.contiguous()
|
418 |
+
|
419 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
420 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
421 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
422 |
+
# is_causal = True if causal_mask is None and q_len > 1 else False
|
423 |
+
|
424 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
425 |
+
query_states,
|
426 |
+
key_states,
|
427 |
+
value_states,
|
428 |
+
attn_mask=attention_mask if isinstance(attention_mask, torch.Tensor) else None,
|
429 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
430 |
+
is_causal=False, # hard coded
|
431 |
+
)
|
432 |
+
|
433 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
434 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
435 |
+
|
436 |
+
attn_output = self.o_proj(attn_output)
|
437 |
+
|
438 |
+
return attn_output, None, past_key_value
|
439 |
+
|
440 |
+
|
441 |
+
class DreamDecoderLayer(nn.Module):
|
442 |
+
def __init__(self, config: DreamConfig, layer_idx: int):
|
443 |
+
super().__init__()
|
444 |
+
self.hidden_size = config.hidden_size
|
445 |
+
|
446 |
+
if config.sliding_window and config._attn_implementation != "flash_attention_2":
|
447 |
+
logger.warning_once(
|
448 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
449 |
+
"unexpected results may be encountered."
|
450 |
+
)
|
451 |
+
|
452 |
+
# self.self_attn = Dream_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
453 |
+
self.self_attn = DreamSdpaAttention(config, layer_idx)
|
454 |
+
|
455 |
+
self.mlp = DreamMLP(config)
|
456 |
+
self.input_layernorm = DreamRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
457 |
+
self.post_attention_layernorm = DreamRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
458 |
+
|
459 |
+
def forward(
|
460 |
+
self,
|
461 |
+
hidden_states: torch.Tensor,
|
462 |
+
attention_mask: Optional[torch.Tensor] = None,
|
463 |
+
position_ids: Optional[torch.LongTensor] = None,
|
464 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
465 |
+
output_attentions: Optional[bool] = False,
|
466 |
+
use_cache: Optional[bool] = False,
|
467 |
+
cache_position: Optional[torch.LongTensor] = None,
|
468 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
|
469 |
+
**kwargs,
|
470 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
471 |
+
"""
|
472 |
+
Args:
|
473 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
474 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
475 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
476 |
+
output_attentions (`bool`, *optional*):
|
477 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
478 |
+
returned tensors for more detail.
|
479 |
+
use_cache (`bool`, *optional*):
|
480 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
481 |
+
(see `past_key_values`).
|
482 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
483 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
484 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
485 |
+
position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
|
486 |
+
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
|
487 |
+
with `head_dim` being the embedding dimension of each attention head.
|
488 |
+
kwargs (`dict`, *optional*):
|
489 |
+
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
490 |
+
into the model
|
491 |
+
"""
|
492 |
+
|
493 |
+
residual = hidden_states
|
494 |
+
|
495 |
+
hidden_states = self.input_layernorm(hidden_states)
|
496 |
+
|
497 |
+
# Self Attention
|
498 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
499 |
+
hidden_states=hidden_states,
|
500 |
+
attention_mask=attention_mask,
|
501 |
+
position_ids=position_ids,
|
502 |
+
past_key_value=past_key_value,
|
503 |
+
output_attentions=output_attentions,
|
504 |
+
use_cache=use_cache,
|
505 |
+
cache_position=cache_position,
|
506 |
+
position_embeddings=position_embeddings,
|
507 |
+
)
|
508 |
+
hidden_states = residual + hidden_states
|
509 |
+
|
510 |
+
# Fully Connected
|
511 |
+
residual = hidden_states
|
512 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
513 |
+
hidden_states = self.mlp(hidden_states)
|
514 |
+
hidden_states = residual + hidden_states
|
515 |
+
|
516 |
+
outputs = (hidden_states,)
|
517 |
+
|
518 |
+
if output_attentions:
|
519 |
+
outputs += (self_attn_weights,)
|
520 |
+
|
521 |
+
if use_cache:
|
522 |
+
outputs += (present_key_value,)
|
523 |
+
|
524 |
+
return outputs
|
525 |
+
|
526 |
+
class DreamPreTrainedModel(PreTrainedModel):
|
527 |
+
config_class = DreamConfig
|
528 |
+
base_model_prefix = "model"
|
529 |
+
supports_gradient_checkpointing = True
|
530 |
+
_no_split_modules = ["DreamDecoderLayer"]
|
531 |
+
_skip_keys_device_placement = "past_key_values"
|
532 |
+
_supports_flash_attn_2 = True
|
533 |
+
_supports_sdpa = True
|
534 |
+
_supports_cache_class = True
|
535 |
+
_supports_quantized_cache = True
|
536 |
+
_supports_static_cache = True
|
537 |
+
|
538 |
+
def _init_weights(self, module):
|
539 |
+
std = self.config.initializer_range
|
540 |
+
if isinstance(module, nn.Linear):
|
541 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
542 |
+
if module.bias is not None:
|
543 |
+
module.bias.data.zero_()
|
544 |
+
elif isinstance(module, nn.Embedding):
|
545 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
546 |
+
if module.padding_idx is not None:
|
547 |
+
module.weight.data[module.padding_idx].zero_()
|
548 |
+
|
549 |
+
@classmethod
|
550 |
+
def from_pretrained(
|
551 |
+
cls,
|
552 |
+
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
|
553 |
+
*model_args,
|
554 |
+
config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
|
555 |
+
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
556 |
+
ignore_mismatched_sizes: bool = False,
|
557 |
+
force_download: bool = False,
|
558 |
+
local_files_only: bool = False,
|
559 |
+
token: Optional[Union[str, bool]] = None,
|
560 |
+
revision: str = "main",
|
561 |
+
use_safetensors: Optional[bool] = None,
|
562 |
+
weights_only: bool = True,
|
563 |
+
**kwargs,
|
564 |
+
):
|
565 |
+
_model = super().from_pretrained(
|
566 |
+
pretrained_model_name_or_path,
|
567 |
+
*model_args,
|
568 |
+
config=config,
|
569 |
+
cache_dir=cache_dir,
|
570 |
+
ignore_mismatched_sizes=ignore_mismatched_sizes,
|
571 |
+
force_download=force_download,
|
572 |
+
local_files_only=local_files_only,
|
573 |
+
token=token,
|
574 |
+
revision=revision,
|
575 |
+
use_safetensors=use_safetensors,
|
576 |
+
weights_only=weights_only,
|
577 |
+
**kwargs,
|
578 |
+
)
|
579 |
+
# NOTE(Lin): we need to override the generation config
|
580 |
+
# because the generation config loaded in `from_pretrained`
|
581 |
+
# does not include all the attributes of DreamGenerationConfig
|
582 |
+
resume_download = kwargs.get("resume_download", None)
|
583 |
+
proxies = kwargs.get("proxies", None)
|
584 |
+
subfolder = kwargs.get("subfolder", "")
|
585 |
+
from_auto_class = kwargs.get("_from_auto", False)
|
586 |
+
from_pipeline = kwargs.get("_from_pipeline", None)
|
587 |
+
_model.generation_config = DreamGenerationConfig.from_pretrained(
|
588 |
+
pretrained_model_name_or_path,
|
589 |
+
cache_dir=cache_dir,
|
590 |
+
force_download=force_download,
|
591 |
+
resume_download=resume_download,
|
592 |
+
proxies=proxies,
|
593 |
+
local_files_only=local_files_only,
|
594 |
+
token=token,
|
595 |
+
revision=revision,
|
596 |
+
subfolder=subfolder,
|
597 |
+
_from_auto=from_auto_class,
|
598 |
+
_from_pipeline=from_pipeline,
|
599 |
+
)
|
600 |
+
return _model
|
601 |
+
|
602 |
+
class DreamBaseModel(DreamPreTrainedModel):
|
603 |
+
"""
|
604 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DreamDecoderLayer`]
|
605 |
+
|
606 |
+
Args:
|
607 |
+
config: DreamConfig
|
608 |
+
"""
|
609 |
+
|
610 |
+
def __init__(self, config: DreamConfig):
|
611 |
+
super().__init__(config)
|
612 |
+
self.padding_idx = config.pad_token_id
|
613 |
+
self.vocab_size = config.vocab_size
|
614 |
+
|
615 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
616 |
+
self.layers = nn.ModuleList(
|
617 |
+
[DreamDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
618 |
+
)
|
619 |
+
self._attn_implementation = config._attn_implementation
|
620 |
+
self.norm = DreamRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
621 |
+
self.rotary_emb = DreamRotaryEmbedding(config=config)
|
622 |
+
|
623 |
+
self.gradient_checkpointing = False
|
624 |
+
# Initialize weights and apply final processing
|
625 |
+
self.post_init()
|
626 |
+
|
627 |
+
def get_input_embeddings(self):
|
628 |
+
return self.embed_tokens
|
629 |
+
|
630 |
+
def set_input_embeddings(self, value):
|
631 |
+
self.embed_tokens = value
|
632 |
+
|
633 |
+
def forward(
|
634 |
+
self,
|
635 |
+
input_ids: torch.LongTensor = None,
|
636 |
+
attention_mask: Optional[torch.Tensor] = None,
|
637 |
+
position_ids: Optional[torch.LongTensor] = None,
|
638 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
639 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
640 |
+
use_cache: Optional[bool] = None,
|
641 |
+
output_attentions: Optional[bool] = None,
|
642 |
+
output_hidden_states: Optional[bool] = None,
|
643 |
+
return_dict: Optional[bool] = None,
|
644 |
+
cache_position: Optional[torch.LongTensor] = None,
|
645 |
+
) -> Union[Tuple, BaseModelOutput]:
|
646 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
647 |
+
output_hidden_states = (
|
648 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
649 |
+
)
|
650 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
651 |
+
|
652 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
653 |
+
|
654 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
655 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
656 |
+
|
657 |
+
if self.gradient_checkpointing and self.training:
|
658 |
+
if use_cache:
|
659 |
+
logger.warning_once(
|
660 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
661 |
+
)
|
662 |
+
use_cache = False
|
663 |
+
|
664 |
+
if inputs_embeds is None:
|
665 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
666 |
+
|
667 |
+
if use_cache and past_key_values is None:
|
668 |
+
past_key_values = DynamicCache()
|
669 |
+
|
670 |
+
if cache_position is None:
|
671 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
672 |
+
cache_position = torch.arange(
|
673 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
674 |
+
)
|
675 |
+
|
676 |
+
if position_ids is None:
|
677 |
+
position_ids = cache_position.unsqueeze(0)
|
678 |
+
|
679 |
+
hidden_states = inputs_embeds
|
680 |
+
|
681 |
+
# create position embeddings to be shared across the decoder layers
|
682 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
683 |
+
|
684 |
+
# decoder layers
|
685 |
+
all_hidden_states = () if output_hidden_states else None
|
686 |
+
all_self_attns = () if output_attentions else None
|
687 |
+
|
688 |
+
for decoder_layer in self.layers:
|
689 |
+
if output_hidden_states:
|
690 |
+
all_hidden_states += (hidden_states,)
|
691 |
+
|
692 |
+
if self.gradient_checkpointing and self.training:
|
693 |
+
layer_outputs = self._gradient_checkpointing_func(
|
694 |
+
decoder_layer.__call__,
|
695 |
+
hidden_states,
|
696 |
+
attention_mask,
|
697 |
+
position_ids,
|
698 |
+
past_key_values,
|
699 |
+
output_attentions,
|
700 |
+
use_cache,
|
701 |
+
cache_position,
|
702 |
+
position_embeddings,
|
703 |
+
)
|
704 |
+
else:
|
705 |
+
layer_outputs = decoder_layer(
|
706 |
+
hidden_states,
|
707 |
+
attention_mask=attention_mask,
|
708 |
+
position_ids=position_ids,
|
709 |
+
past_key_value=past_key_values,
|
710 |
+
output_attentions=output_attentions,
|
711 |
+
use_cache=use_cache,
|
712 |
+
cache_position=cache_position,
|
713 |
+
position_embeddings=position_embeddings,
|
714 |
+
)
|
715 |
+
|
716 |
+
hidden_states = layer_outputs[0]
|
717 |
+
|
718 |
+
if output_attentions:
|
719 |
+
all_self_attns += (layer_outputs[1],)
|
720 |
+
|
721 |
+
hidden_states = self.norm(hidden_states)
|
722 |
+
|
723 |
+
# add hidden states from the last decoder layer
|
724 |
+
if output_hidden_states:
|
725 |
+
all_hidden_states += (hidden_states,)
|
726 |
+
|
727 |
+
if not return_dict:
|
728 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attns] if v is not None)
|
729 |
+
return BaseModelOutput(
|
730 |
+
last_hidden_state=hidden_states,
|
731 |
+
hidden_states=all_hidden_states,
|
732 |
+
attentions=all_self_attns,
|
733 |
+
)
|
734 |
+
|
735 |
+
|
736 |
+
class DreamModel(DreamGenerationMixin, DreamPreTrainedModel):
|
737 |
+
_tied_weights_keys = ["lm_head.weight"]
|
738 |
+
|
739 |
+
def __init__(self, config):
|
740 |
+
super().__init__(config)
|
741 |
+
self.model = DreamBaseModel(config)
|
742 |
+
self.vocab_size = config.vocab_size
|
743 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
744 |
+
|
745 |
+
# Initialize weights and apply final processing
|
746 |
+
self.post_init()
|
747 |
+
|
748 |
+
def reset_rope_parameters(self):
|
749 |
+
self.model.rotary_emb.reset_parameters()
|
750 |
+
for layer in self.model.layers:
|
751 |
+
layer.self_attn.rotary_emb.reset_parameters()
|
752 |
+
|
753 |
+
def get_input_embeddings(self):
|
754 |
+
return self.model.embed_tokens
|
755 |
+
|
756 |
+
def set_input_embeddings(self, value):
|
757 |
+
self.model.embed_tokens = value
|
758 |
+
|
759 |
+
def get_output_embeddings(self):
|
760 |
+
return self.lm_head
|
761 |
+
|
762 |
+
def set_output_embeddings(self, new_embeddings):
|
763 |
+
self.lm_head = new_embeddings
|
764 |
+
|
765 |
+
def set_decoder(self, decoder):
|
766 |
+
self.model = decoder
|
767 |
+
|
768 |
+
def get_decoder(self):
|
769 |
+
return self.model
|
770 |
+
|
771 |
+
def forward(
|
772 |
+
self,
|
773 |
+
input_ids: torch.LongTensor = None,
|
774 |
+
attention_mask: Optional[torch.Tensor] = None,
|
775 |
+
position_ids: Optional[torch.LongTensor] = None,
|
776 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
777 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
778 |
+
labels: Optional[torch.LongTensor] = None,
|
779 |
+
use_cache: Optional[bool] = None,
|
780 |
+
output_attentions: Optional[bool] = None,
|
781 |
+
output_hidden_states: Optional[bool] = None,
|
782 |
+
return_dict: Optional[bool] = None,
|
783 |
+
cache_position: Optional[torch.LongTensor] = None,
|
784 |
+
num_logits_to_keep: int = 0,
|
785 |
+
**loss_kwargs,
|
786 |
+
) -> Union[Tuple, MaskedLMOutput]:
|
787 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
788 |
+
output_hidden_states = (
|
789 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
790 |
+
)
|
791 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
792 |
+
|
793 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
794 |
+
outputs = self.model(
|
795 |
+
input_ids=input_ids,
|
796 |
+
attention_mask=attention_mask,
|
797 |
+
position_ids=position_ids,
|
798 |
+
past_key_values=past_key_values,
|
799 |
+
inputs_embeds=inputs_embeds,
|
800 |
+
use_cache=use_cache,
|
801 |
+
output_attentions=output_attentions,
|
802 |
+
output_hidden_states=output_hidden_states,
|
803 |
+
return_dict=return_dict,
|
804 |
+
cache_position=cache_position,
|
805 |
+
)
|
806 |
+
|
807 |
+
hidden_states = outputs[0]
|
808 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
809 |
+
logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
810 |
+
|
811 |
+
loss = None
|
812 |
+
if labels is not None:
|
813 |
+
loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
|
814 |
+
|
815 |
+
if not return_dict:
|
816 |
+
output = (logits,) + outputs[1:]
|
817 |
+
return (loss,) + output if loss is not None else output
|
818 |
+
|
819 |
+
return MaskedLMOutput(
|
820 |
+
loss=loss,
|
821 |
+
logits=logits,
|
822 |
+
hidden_states=outputs.hidden_states,
|
823 |
+
attentions=outputs.attentions,
|
824 |
+
)
|