lxr2003 锅中冰 commited on
Commit
2e79461
·
verified ·
1 Parent(s): 8393537

Add file to support AutoConfig (#2)

Browse files

- Upload configration_videoxlpro_llavaqwen.py and modeling_videoxlpro_llavaqwen.py to support AutoModel load. (be892512452c30641890ea9bec81612ba605e441)


Co-authored-by: 锅中冰 <[email protected]>

config.json CHANGED
@@ -3,6 +3,10 @@
3
  "architectures": [
4
  "LlavaQwenForCausalLM"
5
  ],
 
 
 
 
6
  "attention_dropout": 0.0,
7
  "beacon_accum": true,
8
  "beacon_attend_prev": true,
@@ -854,7 +858,7 @@
854
  "mm_vision_select_layer": -1,
855
  "mm_vision_tower": "/share/LXRlxr0_0/code/videoxlturbo2.0/videoxl_adaptfps/google/siglip-so400m-patch14-384",
856
  "mm_vision_tower_lr": null,
857
- "model_type": "qwen2",
858
  "num_attention_heads": 16,
859
  "num_hidden_layers": 36,
860
  "num_key_value_heads": 2,
 
3
  "architectures": [
4
  "LlavaQwenForCausalLM"
5
  ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_videoxlpro_llavaqwen.LlavaQwenConfig",
8
+ "AutoModelForCausalLM": "modeling_videoxlpro_llavaqwen.LlavaQwenForCausalLM"
9
+ },
10
  "attention_dropout": 0.0,
11
  "beacon_accum": true,
12
  "beacon_attend_prev": true,
 
858
  "mm_vision_select_layer": -1,
859
  "mm_vision_tower": "/share/LXRlxr0_0/code/videoxlturbo2.0/videoxl_adaptfps/google/siglip-so400m-patch14-384",
860
  "mm_vision_tower_lr": null,
861
+ "model_type": "videoxlpro_llavaqwen",
862
  "num_attention_heads": 16,
863
  "num_hidden_layers": 36,
864
  "num_key_value_heads": 2,
configuration_videoxlpro_llavaqwen.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+
5
+ logger = logging.get_logger(__name__)
6
+
7
+ QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
8
+ "Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json",
9
+ }
10
+
11
+
12
+ class Qwen2Config(PretrainedConfig):
13
+ r"""
14
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
15
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
16
+ with the defaults will yield a similar configuration to that of
17
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
18
+
19
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
20
+ documentation from [`PretrainedConfig`] for more information.
21
+
22
+
23
+ Args:
24
+ vocab_size (`int`, *optional*, defaults to 151936):
25
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
26
+ `inputs_ids` passed when calling [`Qwen2Model`]
27
+ hidden_size (`int`, *optional*, defaults to 4096):
28
+ Dimension of the hidden representations.
29
+ intermediate_size (`int`, *optional*, defaults to 22016):
30
+ Dimension of the MLP representations.
31
+ num_hidden_layers (`int`, *optional*, defaults to 32):
32
+ Number of hidden layers in the Transformer encoder.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer encoder.
35
+ num_key_value_heads (`int`, *optional*, defaults to 32):
36
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
37
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
38
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
39
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
40
+ by meanpooling all the original heads within that group. For more details checkout [this
41
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
42
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
43
+ The non-linear activation function (function or string) in the decoder.
44
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
45
+ The maximum sequence length that this model might ever be used with.
46
+ initializer_range (`float`, *optional*, defaults to 0.02):
47
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
48
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
49
+ The epsilon used by the rms normalization layers.
50
+ use_cache (`bool`, *optional*, defaults to `True`):
51
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
52
+ relevant if `config.is_decoder=True`.
53
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
54
+ Whether the model's input and output word embeddings should be tied.
55
+ rope_theta (`float`, *optional*, defaults to 10000.0):
56
+ The base period of the RoPE embeddings.
57
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
58
+ Whether to use sliding window attention.
59
+ sliding_window (`int`, *optional*, defaults to 4096):
60
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
61
+ max_window_layers (`int`, *optional*, defaults to 28):
62
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+
66
+ ```python
67
+ >>> from transformers import Qwen2Model, Qwen2Config
68
+
69
+ >>> # Initializing a Qwen2 style configuration
70
+ >>> configuration = Qwen2Config()
71
+
72
+ >>> # Initializing a model from the Qwen2-7B style configuration
73
+ >>> model = Qwen2Model(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "qwen2"
80
+ keys_to_ignore_at_inference = ["past_key_values"]
81
+
82
+ def __init__(
83
+ self,
84
+ vocab_size=151936,
85
+ hidden_size=4096,
86
+ intermediate_size=22016,
87
+ num_hidden_layers=32,
88
+ num_attention_heads=32,
89
+ num_key_value_heads=32,
90
+ hidden_act="silu",
91
+ max_position_embeddings=32768,
92
+ initializer_range=0.02,
93
+ rms_norm_eps=1e-6,
94
+ use_cache=True,
95
+ tie_word_embeddings=False,
96
+ rope_theta=10000.0,
97
+ use_sliding_window=False,
98
+ sliding_window=4096,
99
+ rope_scaling=None,
100
+ max_window_layers=28,
101
+ attention_dropout=0.0,
102
+ beacon_window=1024,
103
+ beacon_stride=1024,
104
+ beacon_attn="full-coverage",
105
+ beacon_ratio=[2,4,8,16,32],
106
+ beacon_ratio_mix="step-random",
107
+ beacon_param=[],
108
+ beacon_embed_init="eos",
109
+ beacon_sink_size=0,
110
+ beacon_attend_prev=True,
111
+ beacon_pos="interleave",
112
+ beacon_parallel_window=1,
113
+ **kwargs,
114
+ ):
115
+ self.vocab_size = vocab_size
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.hidden_size = hidden_size
118
+ self.intermediate_size = intermediate_size
119
+ self.num_hidden_layers = num_hidden_layers
120
+ self.num_attention_heads = num_attention_heads
121
+ self.use_sliding_window = use_sliding_window
122
+ self.sliding_window = sliding_window
123
+ self.max_window_layers = max_window_layers
124
+ self.rope_scaling = rope_scaling
125
+
126
+ # for backward compatibility
127
+ if num_key_value_heads is None:
128
+ num_key_value_heads = num_attention_heads
129
+
130
+ self.num_key_value_heads = num_key_value_heads
131
+ self.hidden_act = hidden_act
132
+ self.initializer_range = initializer_range
133
+ self.rms_norm_eps = rms_norm_eps
134
+ self.use_cache = use_cache
135
+ self.rope_theta = rope_theta
136
+ self.attention_dropout = attention_dropout
137
+
138
+ self.beacon_window = beacon_window
139
+ self.beacon_stride = beacon_stride
140
+ self.beacon_attn = beacon_attn
141
+ self.beacon_ratio = beacon_ratio
142
+ self.beacon_ratio_mix = beacon_ratio_mix
143
+ self.beacon_param = beacon_param
144
+ self.beacon_embed_init = beacon_embed_init
145
+ self.beacon_sink_size = beacon_sink_size
146
+ self.beacon_attend_prev = beacon_attend_prev
147
+ self.beacon_pos = beacon_pos
148
+ self.beacon_parallel_window = beacon_parallel_window
149
+
150
+ super().__init__(
151
+ tie_word_embeddings=tie_word_embeddings,
152
+ **kwargs,
153
+ )
154
+
155
+
156
+ class LlavaQwenConfig(Qwen2Config):
157
+ model_type = "videoxlpro_llavaqwen"
modeling_videoxlpro_llavaqwen.py ADDED
The diff for this file is too large to render. See raw diff