pdjohn commited on
Commit
ccfeb92
·
verified ·
1 Parent(s): 5b97b7a

Upload model

Browse files
Files changed (3) hide show
  1. config.json +13 -9
  2. configuration_eurobert.py +216 -0
  3. model.safetensors +1 -1
config.json CHANGED
@@ -41,7 +41,7 @@
41
  "mask_token_id": 128002,
42
  "max_position_embeddings": 8192,
43
  "mlp_bias": false,
44
- "model_type": "causalbert_multitask",
45
  "num_attention_heads": 12,
46
  "num_hidden_layers": 12,
47
  "num_key_value_heads": 12,
@@ -50,21 +50,25 @@
50
  "pad_token": "<|end_of_text|>",
51
  "pad_token_id": 128001,
52
  "pretraining_tp": 1,
53
- "relation_class_weights": null,
 
 
 
 
 
54
  "rms_norm_eps": 1e-05,
55
  "rope_scaling": null,
56
  "rope_theta": 250000,
57
  "span_class_weights": [
58
- 0.09086648603440675,
59
- 2.1256438250409166,
60
- 1.4697529981521529,
61
- 0.9365069591289833,
62
- 0.37722973164353996
63
  ],
64
  "tie_word_embeddings": false,
65
  "torch_dtype": "bfloat16",
66
  "transformers_version": "4.53.1",
67
  "use_cache": false,
68
- "vocab_size": 128256,
69
- "vocab_size_with_special_tokens": 128256
70
  }
 
41
  "mask_token_id": 128002,
42
  "max_position_embeddings": 8192,
43
  "mlp_bias": false,
44
+ "model_type": "eurobert",
45
  "num_attention_heads": 12,
46
  "num_hidden_layers": 12,
47
  "num_key_value_heads": 12,
 
50
  "pad_token": "<|end_of_text|>",
51
  "pad_token_id": 128001,
52
  "pretraining_tp": 1,
53
+ "relation_class_weights": [
54
+ 3.1413280715940357,
55
+ 0.06053432820389329,
56
+ 0.050202345060633424,
57
+ 0.7479352551414371
58
+ ],
59
  "rms_norm_eps": 1e-05,
60
  "rope_scaling": null,
61
  "rope_theta": 250000,
62
  "span_class_weights": [
63
+ 0.09106958441686581,
64
+ 2.139054502968615,
65
+ 1.4801632619082092,
66
+ 0.9552814362568587,
67
+ 0.3344312144494511
68
  ],
69
  "tie_word_embeddings": false,
70
  "torch_dtype": "bfloat16",
71
  "transformers_version": "4.53.1",
72
  "use_cache": false,
73
+ "vocab_size": 128256
 
74
  }
configuration_eurobert.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/eurobert/modular_eurobert.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_eurobert.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 Nicolas Boizard, Duarte M. Alves, Hippolyte Gisserot-Boukhlef and the EuroBert team. All rights reserved.
9
+ #
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+
23
+ from transformers.utils import logging
24
+ from transformers.models.llama import LlamaConfig
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ class EuroBertConfig(LlamaConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`EuroBertModel`]. It is used to instantiate an EuroBert
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the EuroBERT-210m.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 128256):
42
+ Vocabulary size of the EuroBert model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`EuroBertModel`]
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ intermediate_size (`int`, *optional*, defaults to 3072):
47
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
+ `num_attention_heads`.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the encoder and pooler.
62
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
63
+ The maximum sequence length that this model might ever be used with. EuroBert supports up to 8192 tokens,
64
+ EuroBert-pretrained up to 2048.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the rms normalization layers.
69
+ bos_token_id (`int`, *optional*, defaults to 128000):
70
+ Beginning of stream token id.
71
+ eos_token_id (`int`, *optional*, defaults to 128001):
72
+ End of stream token id.
73
+ pad_token_id (`int`, *optional*, defaults to 128001):
74
+ Padding token id.
75
+ mask_token_id (`int`, *optional*, defaults to 128002):
76
+ Mask token id.
77
+ pretraining_tp (`int`, *optional*, defaults to 1):
78
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
79
+ document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
80
+ understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
81
+ results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
82
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
83
+ Whether to tie weight embeddings
84
+ rope_theta (`float`, *optional*, defaults to 250000.0):
85
+ The base period of the RoPE embeddings. EuroBert used base period of 250000.0,
86
+ EuroBert-pretrained 10000.0.
87
+ rope_scaling (`Dict`, *optional*):
88
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
89
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
90
+ accordingly.
91
+ Expected contents:
92
+ `rope_type` (`str`):
93
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
94
+ 'eurobert3'], with 'default' being the original RoPE implementation.
95
+ `factor` (`float`, *optional*):
96
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
97
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
98
+ original maximum pre-trained length.
99
+ `original_max_position_embeddings` (`int`, *optional*):
100
+ Used with 'dynamic', 'longrope' and 'eurobert3'. The original max position embeddings used during
101
+ pretraining.
102
+ `attention_factor` (`float`, *optional*):
103
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
104
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
105
+ `factor` field to infer the suggested value.
106
+ `beta_fast` (`float`, *optional*):
107
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
108
+ ramp function. If unspecified, it defaults to 32.
109
+ `beta_slow` (`float`, *optional*):
110
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
111
+ ramp function. If unspecified, it defaults to 1.
112
+ `short_factor` (`List[float]`, *optional*):
113
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
114
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
115
+ size divided by the number of attention heads divided by 2
116
+ `long_factor` (`List[float]`, *optional*):
117
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
118
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
119
+ size divided by the number of attention heads divided by 2
120
+ `low_freq_factor` (`float`, *optional*):
121
+ Only used with 'eurobert3'. Scaling factor applied to low frequency components of the RoPE
122
+ `high_freq_factor` (`float`, *optional*):
123
+ Only used with 'eurobert3'. Scaling factor applied to high frequency components of the RoPE
124
+ attention_bias (`bool`, *optional*, defaults to `False`):
125
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
126
+ attention_dropout (`float`, *optional*, defaults to 0.0):
127
+ The dropout ratio for the attention probabilities.
128
+ mlp_bias (`bool`, *optional*, defaults to `False`):
129
+ Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
130
+ head_dim (`int`, *optional*):
131
+ The attention head dimension. If None, it will default to hidden_size // num_attention_heads
132
+ classifier_pooling (`str`, *optional*, defaults to `"late"`):
133
+ The pooling strategy to use for the classifier. Can be one of ['bos', 'mean', 'late'].
134
+
135
+ ```python
136
+ >>> from transformers import EuroBertModel, EuroBertConfig
137
+
138
+ >>> # Initializing a EuroBert eurobert-base style configuration
139
+ >>> configuration = EuroBertConfig()
140
+
141
+ >>> # Initializing a model from the eurobert-base style configuration
142
+ >>> model = EuroBertModel(configuration)
143
+
144
+ >>> # Accessing the model configuration
145
+ >>> configuration = model.config
146
+ ```"""
147
+
148
+ model_type = "eurobert"
149
+
150
+ def __init__(
151
+ self,
152
+ vocab_size=128256,
153
+ hidden_size=768,
154
+ intermediate_size=3072,
155
+ num_hidden_layers=12,
156
+ num_attention_heads=12,
157
+ num_key_value_heads=None,
158
+ hidden_act="silu",
159
+ max_position_embeddings=8192,
160
+ initializer_range=0.02,
161
+ rms_norm_eps=1e-05,
162
+ bos_token_id=128000,
163
+ eos_token_id=128001,
164
+ pad_token_id=128001,
165
+ mask_token_id=128002,
166
+ pretraining_tp=1,
167
+ tie_word_embeddings=False,
168
+ rope_theta=250000.0,
169
+ rope_scaling=None,
170
+ attention_bias=False,
171
+ attention_dropout=0.0,
172
+ mlp_bias=False,
173
+ head_dim=None,
174
+ classifier_pooling="late",
175
+ **kwargs,
176
+ ):
177
+ # use_cache is specific to decoder models and should be set to False for encoder models
178
+ use_cache = kwargs.pop("use_cache", None)
179
+ if use_cache:
180
+ logger.warning_once(
181
+ "The `use_cache` argument to EuroBertConfig is set to `False`, as caching is never used for encoder models."
182
+ )
183
+
184
+ if num_key_value_heads is None:
185
+ num_key_value_heads = num_attention_heads
186
+
187
+ super().__init__(
188
+ vocab_size=vocab_size,
189
+ hidden_size=hidden_size,
190
+ intermediate_size=intermediate_size,
191
+ num_hidden_layers=num_hidden_layers,
192
+ num_attention_heads=num_attention_heads,
193
+ num_key_value_heads=num_key_value_heads,
194
+ hidden_act=hidden_act,
195
+ max_position_embeddings=max_position_embeddings,
196
+ initializer_range=initializer_range,
197
+ rms_norm_eps=rms_norm_eps,
198
+ use_cache=False,
199
+ bos_token_id=bos_token_id,
200
+ eos_token_id=eos_token_id,
201
+ pad_token_id=pad_token_id,
202
+ pretraining_tp=pretraining_tp,
203
+ tie_word_embeddings=tie_word_embeddings,
204
+ rope_theta=rope_theta,
205
+ rope_scaling=rope_scaling,
206
+ attention_bias=attention_bias,
207
+ attention_dropout=attention_dropout,
208
+ mlp_bias=mlp_bias,
209
+ head_dim=head_dim,
210
+ **kwargs,
211
+ )
212
+ self.mask_token_id = mask_token_id
213
+ self.clf_pooling = classifier_pooling
214
+
215
+
216
+ __all__ = ["EuroBertConfig"]
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ce980756462746c271fc6aa8e71b1da7efdab38434a890802ba384ca70220f6
3
  size 423558482
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd85103cb0c7240d129a3bf142c2b70130e269d41375579994d4101e7877d4d9
3
  size 423558482