lxr2003 commited on
Commit
9f4d160
·
verified ·
1 Parent(s): 2d45a50

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/config-checkpoint.json CHANGED
@@ -4,28 +4,7 @@
4
  "LlavaQwenForCausalLM"
5
  ],
6
  "attention_dropout": 0.0,
7
- "beacon_accum": true,
8
- "beacon_attend_prev": true,
9
- "beacon_attn": "full-coverage",
10
- "beacon_embed_init": "eos",
11
- "beacon_parallel_window": 1,
12
- "beacon_param": [
13
- "q",
14
- "k",
15
- "v"
16
- ],
17
- "beacon_pos": "interleave",
18
- "beacon_ratio": [
19
- 2,
20
- 4,
21
- 8
22
- ],
23
- "beacon_ratio_mix": "step-random",
24
- "beacon_sink_size": 0,
25
- "beacon_stride": 1440,
26
- "beacon_window": 1440,
27
  "bos_token_id": 151643,
28
- "enable_beacon": true,
29
  "eos_token_id": 151645,
30
  "freeze_mm_mlp_adapter": false,
31
  "freeze_mm_vision_resampler": false,
 
4
  "LlavaQwenForCausalLM"
5
  ],
6
  "attention_dropout": 0.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "bos_token_id": 151643,
 
8
  "eos_token_id": 151645,
9
  "freeze_mm_mlp_adapter": false,
10
  "freeze_mm_vision_resampler": false,
.ipynb_checkpoints/configuration_videoxlpro_llavaqwen-checkpoint.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+
5
+ logger = logging.get_logger(__name__)
6
+
7
+ QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
8
+ "Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json",
9
+ }
10
+
11
+
12
+ class Qwen2Config(PretrainedConfig):
13
+ r"""
14
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
15
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
16
+ with the defaults will yield a similar configuration to that of
17
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
18
+
19
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
20
+ documentation from [`PretrainedConfig`] for more information.
21
+
22
+
23
+ Args:
24
+ vocab_size (`int`, *optional*, defaults to 151936):
25
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
26
+ `inputs_ids` passed when calling [`Qwen2Model`]
27
+ hidden_size (`int`, *optional*, defaults to 4096):
28
+ Dimension of the hidden representations.
29
+ intermediate_size (`int`, *optional*, defaults to 22016):
30
+ Dimension of the MLP representations.
31
+ num_hidden_layers (`int`, *optional*, defaults to 32):
32
+ Number of hidden layers in the Transformer encoder.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer encoder.
35
+ num_key_value_heads (`int`, *optional*, defaults to 32):
36
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
37
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
38
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
39
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
40
+ by meanpooling all the original heads within that group. For more details checkout [this
41
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
42
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
43
+ The non-linear activation function (function or string) in the decoder.
44
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
45
+ The maximum sequence length that this model might ever be used with.
46
+ initializer_range (`float`, *optional*, defaults to 0.02):
47
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
48
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
49
+ The epsilon used by the rms normalization layers.
50
+ use_cache (`bool`, *optional*, defaults to `True`):
51
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
52
+ relevant if `config.is_decoder=True`.
53
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
54
+ Whether the model's input and output word embeddings should be tied.
55
+ rope_theta (`float`, *optional*, defaults to 10000.0):
56
+ The base period of the RoPE embeddings.
57
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
58
+ Whether to use sliding window attention.
59
+ sliding_window (`int`, *optional*, defaults to 4096):
60
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
61
+ max_window_layers (`int`, *optional*, defaults to 28):
62
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+
66
+ ```python
67
+ >>> from transformers import Qwen2Model, Qwen2Config
68
+
69
+ >>> # Initializing a Qwen2 style configuration
70
+ >>> configuration = Qwen2Config()
71
+
72
+ >>> # Initializing a model from the Qwen2-7B style configuration
73
+ >>> model = Qwen2Model(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "qwen2"
80
+ keys_to_ignore_at_inference = ["past_key_values"]
81
+
82
+ def __init__(
83
+ self,
84
+ vocab_size=151936,
85
+ hidden_size=4096,
86
+ intermediate_size=22016,
87
+ num_hidden_layers=32,
88
+ num_attention_heads=32,
89
+ num_key_value_heads=32,
90
+ hidden_act="silu",
91
+ max_position_embeddings=32768,
92
+ initializer_range=0.02,
93
+ rms_norm_eps=1e-6,
94
+ use_cache=True,
95
+ tie_word_embeddings=False,
96
+ rope_theta=10000.0,
97
+ use_sliding_window=False,
98
+ sliding_window=4096,
99
+ rope_scaling=None,
100
+ max_window_layers=28,
101
+ attention_dropout=0.0,
102
+ beacon_window=1024,
103
+ beacon_stride=1024,
104
+ beacon_attn="full-coverage",
105
+ beacon_ratio=[2,4,8,16,32],
106
+ beacon_ratio_mix="step-random",
107
+ beacon_param=[],
108
+ beacon_embed_init="eos",
109
+ beacon_sink_size=0,
110
+ beacon_attend_prev=True,
111
+ beacon_pos="interleave",
112
+ beacon_parallel_window=1,
113
+ **kwargs,
114
+ ):
115
+ self.vocab_size = vocab_size
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.hidden_size = hidden_size
118
+ self.intermediate_size = intermediate_size
119
+ self.num_hidden_layers = num_hidden_layers
120
+ self.num_attention_heads = num_attention_heads
121
+ self.use_sliding_window = use_sliding_window
122
+ self.sliding_window = sliding_window
123
+ self.max_window_layers = max_window_layers
124
+ self.rope_scaling = rope_scaling
125
+
126
+ # for backward compatibility
127
+ if num_key_value_heads is None:
128
+ num_key_value_heads = num_attention_heads
129
+
130
+ self.num_key_value_heads = num_key_value_heads
131
+ self.hidden_act = hidden_act
132
+ self.initializer_range = initializer_range
133
+ self.rms_norm_eps = rms_norm_eps
134
+ self.use_cache = use_cache
135
+ self.rope_theta = rope_theta
136
+ self.attention_dropout = attention_dropout
137
+
138
+ self.beacon_window = beacon_window
139
+ self.beacon_stride = beacon_stride
140
+ self.beacon_attn = beacon_attn
141
+ self.beacon_ratio = beacon_ratio
142
+ self.beacon_ratio_mix = beacon_ratio_mix
143
+ self.beacon_param = beacon_param
144
+ self.beacon_embed_init = beacon_embed_init
145
+ self.beacon_sink_size = beacon_sink_size
146
+ self.beacon_attend_prev = beacon_attend_prev
147
+ self.beacon_pos = beacon_pos
148
+ self.beacon_parallel_window = beacon_parallel_window
149
+
150
+ super().__init__(
151
+ tie_word_embeddings=tie_word_embeddings,
152
+ **kwargs,
153
+ )
154
+
155
+
156
+ class LlavaQwenConfig(Qwen2Config):
157
+ model_type = "videoxlpro_llavaqwen"
.ipynb_checkpoints/modeling_videoxlpro_llavaqwen-checkpoint.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Hao Zhang
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional, Tuple, Union, Dict
17
+ import torch
18
+ import torch.nn as nn
19
+ from torch.nn import CrossEntropyLoss
20
+
21
+ import transformers
22
+ from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig, LlamaModel, LlamaForCausalLM
23
+
24
+ from transformers.modeling_outputs import CausalLMOutputWithPast
25
+ from transformers.generation.utils import GenerateOutput
26
+
27
+ # from ...constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
28
+ from videoxlpro.videoxlpro.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
29
+ from transformers import Qwen2Config, Qwen2Model, Qwen2ForCausalLM
30
+
31
+ # from .qwen.modeling_qwen import QWenLMHeadModel, QWenModel
32
+ # from .qwen.configuration_qwen import QWenConfig
33
+
34
+
35
+ class LlavaQwenConfig(Qwen2Config):
36
+ model_type = "llava_qwen"
37
+
38
+
39
+ class LlavaQwenModel(LlavaMetaModel, Qwen2Model):
40
+ config_class = LlavaQwenConfig
41
+
42
+ def __init__(self, config: Qwen2Config):
43
+ super(LlavaQwenModel, self).__init__(config)
44
+
45
+
46
+ class LlavaQwenForCausalLM(Qwen2ForCausalLM, LlavaMetaForCausalLM):
47
+ config_class = LlavaQwenConfig
48
+
49
+ def __init__(self, config):
50
+ # super(Qwen2ForCausalLM, self).__init__(config)
51
+ Qwen2ForCausalLM.__init__(self, config)
52
+ config.model_type = "llava_qwen"
53
+ config.rope_scaling = None
54
+
55
+ self.model = LlavaQwenModel(config)
56
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
57
+ # Initialize weights and apply final processing
58
+ self.post_init()
59
+
60
+ def get_model(self):
61
+ return self.model
62
+
63
+ def forward(
64
+ self,
65
+ input_ids: torch.LongTensor = None,
66
+ attention_mask: Optional[torch.Tensor] = None,
67
+ position_ids: Optional[torch.LongTensor] = None,
68
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
69
+ inputs_embeds: Optional[torch.FloatTensor] = None,
70
+ labels: Optional[torch.LongTensor] = None,
71
+ use_cache: Optional[bool] = None,
72
+ output_attentions: Optional[bool] = None,
73
+ output_hidden_states: Optional[bool] = None,
74
+ images: Optional[torch.FloatTensor] = None,
75
+ image_sizes: Optional[List[List[int]]] = None,
76
+ return_dict: Optional[bool] = None,
77
+ modalities: Optional[List[str]] = ["image"],
78
+ dpo_forward: Optional[bool] = False,
79
+ cache_position=None,
80
+ time_embedding=None
81
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
82
+
83
+ if inputs_embeds is None:
84
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes,time_embedding)
85
+
86
+ # print("input_ids",input_ids)
87
+ # print("input_embeds",inputs_embeds.shape)
88
+ # print("labels",labels)
89
+ # print("mask",attention_mask)
90
+
91
+ if dpo_forward:
92
+ outputs = self.model(
93
+ input_ids=input_ids,
94
+ attention_mask=attention_mask,
95
+ position_ids=position_ids,
96
+ past_key_values=past_key_values,
97
+ inputs_embeds=inputs_embeds,
98
+ use_cache=use_cache,
99
+ output_attentions=output_attentions,
100
+ output_hidden_states=output_hidden_states,
101
+ return_dict=return_dict,
102
+ )
103
+
104
+ hidden_states = outputs[0]
105
+ logits = self.lm_head(hidden_states)
106
+ return logits, labels
107
+
108
+ else:
109
+ return super().forward(
110
+ input_ids=input_ids,
111
+ attention_mask=attention_mask,
112
+ position_ids=position_ids,
113
+ past_key_values=past_key_values,
114
+ inputs_embeds=inputs_embeds,
115
+ labels=labels,
116
+ use_cache=use_cache,
117
+ output_attentions=output_attentions,
118
+ output_hidden_states=output_hidden_states,
119
+ return_dict=return_dict,
120
+ )
121
+
122
+ @torch.no_grad()
123
+ def generate(
124
+ self,
125
+ inputs: Optional[torch.Tensor] = None,
126
+ images: Optional[torch.Tensor] = None,
127
+ image_sizes: Optional[torch.Tensor] = None,
128
+ modalities: Optional[List[str]] = ["image"],
129
+ time_embedding=None,
130
+ **kwargs,
131
+ ) -> Union[GenerateOutput, torch.LongTensor]:
132
+ position_ids = kwargs.pop("position_ids", None)
133
+ attention_mask = kwargs.pop("attention_mask", None)
134
+ if "inputs_embeds" in kwargs:
135
+ raise NotImplementedError("`inputs_embeds` is not supported")
136
+
137
+ if images is not None:
138
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes,time_embedding=time_embedding)
139
+
140
+ else:
141
+ inputs_embeds = self.get_model().embed_tokens(inputs)
142
+
143
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
144
+
145
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
146
+ images = kwargs.pop("images", None)
147
+ image_sizes = kwargs.pop("image_sizes", None)
148
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
149
+ if images is not None:
150
+ inputs["images"] = images
151
+ if image_sizes is not None:
152
+ inputs["image_sizes"] = image_sizes
153
+ return inputs
154
+
155
+ AutoConfig.register("llava_qwen", LlavaQwenConfig)
156
+ AutoModelForCausalLM.register(LlavaQwenConfig, LlavaQwenForCausalLM)
config.json CHANGED
@@ -3,6 +3,10 @@
3
  "architectures": [
4
  "LlavaQwenForCausalLM"
5
  ],
 
 
 
 
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 151643,
8
  "eos_token_id": 151645,
@@ -833,7 +837,7 @@
833
  "mm_vision_select_layer": -1,
834
  "mm_vision_tower": "/share/LXRlxr0_0/code/videoxlturbo2.0/videoxl/google/siglip-so400m-patch14-384",
835
  "mm_vision_tower_lr": null,
836
- "model_type": "qwen2",
837
  "num_attention_heads": 16,
838
  "num_hidden_layers": 36,
839
  "num_key_value_heads": 2,
 
3
  "architectures": [
4
  "LlavaQwenForCausalLM"
5
  ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_videoxlpro_llavaqwen.LlavaQwenConfig",
8
+ "AutoModelForCausalLM": "modeling_videoxlpro_llavaqwen.LlavaQwenForCausalLM"
9
+ },
10
  "attention_dropout": 0.0,
11
  "bos_token_id": 151643,
12
  "eos_token_id": 151645,
 
837
  "mm_vision_select_layer": -1,
838
  "mm_vision_tower": "/share/LXRlxr0_0/code/videoxlturbo2.0/videoxl/google/siglip-so400m-patch14-384",
839
  "mm_vision_tower_lr": null,
840
+ "model_type": "videoxlpro_llavaqwen",
841
  "num_attention_heads": 16,
842
  "num_hidden_layers": 36,
843
  "num_key_value_heads": 2,
modeling_videoxlpro_llavaqwen.py CHANGED
The diff for this file is too large to render. See raw diff