dongyh commited on
Commit
3dedebd
·
verified ·
1 Parent(s): 2ad113a

first commit

Browse files
config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/nfs100/dongyh/FANformer-1B",
3
+ "activation_type": "swiglu",
4
+ "alibi": false,
5
+ "alibi_bias_max": 8.0,
6
+ "architectures": [
7
+ "OLMoForCausalLM"
8
+ ],
9
+ "att_nolinear": false,
10
+ "attention_activation": null,
11
+ "attention_dropout": 0.0,
12
+ "attention_layer_norm": false,
13
+ "attention_layer_norm_with_affine": false,
14
+ "auto_map": {
15
+ "AutoConfig": "configuration_olmo.OLMoConfig",
16
+ "AutoModelForCausalLM": "modeling_fan.OLMoForCausalLM"
17
+ },
18
+ "bias_for_layer_norm": false,
19
+ "block_group_size": 1,
20
+ "block_type": "sequential",
21
+ "clip_qkv": null,
22
+ "d_model": 2048,
23
+ "emb_init_std": null,
24
+ "embedding_dropout": 0.0,
25
+ "embedding_layer_norm": false,
26
+ "embedding_size": 50304,
27
+ "eos_token_id": 50279,
28
+ "ffn_activation": null,
29
+ "flash_attention": true,
30
+ "include_bias": false,
31
+ "init_cutoff_factor": null,
32
+ "init_device": "cuda",
33
+ "init_fn": "mitchell",
34
+ "init_std": 0.02,
35
+ "layer_norm_eps": 1e-06,
36
+ "layer_norm_type": "rms",
37
+ "layer_norm_with_affine": true,
38
+ "max_sequence_length": 2048,
39
+ "mlp_hidden_size": null,
40
+ "mlp_ratio": 8,
41
+ "model_type": "hf_olmo",
42
+ "multi_query_attention": false,
43
+ "n_heads": 16,
44
+ "n_kv_heads": null,
45
+ "n_layers": 16,
46
+ "nofanbias": false,
47
+ "nonorm1": false,
48
+ "norm_after": false,
49
+ "p_ratio": 0.25,
50
+ "pad_token_id": 1,
51
+ "precision": "amp_bf16",
52
+ "residual_dropout": 0.0,
53
+ "rope": true,
54
+ "rope_full_precision": true,
55
+ "rope_theta": 10000,
56
+ "scale_emb_init": false,
57
+ "scale_logits": false,
58
+ "torch_dtype": "float32",
59
+ "transformers_version": "4.49.0",
60
+ "use_A": false,
61
+ "use_ATF": true,
62
+ "use_cache": true,
63
+ "use_fpn": false,
64
+ "use_fpneq": false,
65
+ "use_fpnnow": false,
66
+ "use_fpnpn": false,
67
+ "use_mod": false,
68
+ "use_mod_ffn": 0,
69
+ "vocab_size": 50280,
70
+ "weight_tying": true
71
+ }
configuration_olmo.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ OLMo configuration
3
+ """
4
+
5
+ from transformers import AutoConfig, PretrainedConfig
6
+ from transformers.utils import logging
7
+
8
+ from olmo.config import ModelConfig
9
+ from olmo.exceptions import OLMoConfigurationError
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class OLMoConfig(PretrainedConfig):
15
+ model_type = "hf_olmo"
16
+ keys_to_ignore_at_inference = ["past_key_values"] # TODO: confirm
17
+
18
+ def __init__(self, use_cache: bool = False, **kwargs):
19
+ model_config = ModelConfig()
20
+ all_kwargs = model_config.asdict()
21
+ all_kwargs.update(kwargs)
22
+ all_kwargs.update({"use_cache": use_cache})
23
+ all_kwargs.update(
24
+ {"architectures": all_kwargs.get("architectures", ["OLMoForCausalLM"]) or ["OLMoForCausalLM"]}
25
+ )
26
+ super().__init__(**all_kwargs)
27
+
28
+ @property
29
+ def num_attention_heads(self):
30
+ return self.n_heads
31
+
32
+ @property
33
+ def num_hidden_layers(self):
34
+ return self.n_layers
35
+
36
+ @property
37
+ def hidden_size(self):
38
+ return self.d_model
39
+
40
+ @property
41
+ def effective_n_kv_heads(self) -> int:
42
+ if self.n_kv_heads is None:
43
+ if self.multi_query_attention is True:
44
+ return 1
45
+ else:
46
+ return self.n_heads
47
+ else:
48
+ if self.multi_query_attention is None:
49
+ return self.n_kv_heads
50
+ if self.multi_query_attention:
51
+ n_kv_heads_should_be = 1
52
+ else:
53
+ n_kv_heads_should_be = self.n_heads
54
+ if self.n_kv_heads == n_kv_heads_should_be:
55
+ return n_kv_heads_should_be
56
+ else:
57
+ raise OLMoConfigurationError(
58
+ "You can't set `multi_query_attention` and `n_kv_heads` at the same time."
59
+ )
60
+
61
+
62
+ # Register the config class so that it is available for transformer pipelines, auto-loading etc.
63
+ # OLMo is integrated directly in transformers from v4.40.0 onwards, but the version in transformers
64
+ # may not support the newest architectures we create.
65
+ AutoConfig.register("hf_olmo", OLMoConfig)
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 50279,
4
+ "pad_token_id": 1,
5
+ "transformers_version": "4.49.0"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abfcfa6274cec50b52ef98b4c541d8c7738c2d19bc146526114d71e55316312b
3
+ size 4908768976