Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +986 -0
- added_tokens.json +33 -0
- chat_template.jinja +54 -0
- config.json +330 -0
- configuration_internvl_chat.py +97 -0
- generation_config.json +4 -0
- merges.txt +0 -0
- model-00001-of-00010.safetensors +3 -0
- model-00002-of-00010.safetensors +3 -0
- model-00003-of-00010.safetensors +3 -0
- model-00004-of-00010.safetensors +3 -0
- model-00005-of-00010.safetensors +3 -0
- model-00006-of-00010.safetensors +3 -0
- model-00007-of-00010.safetensors +3 -0
- model-00008-of-00010.safetensors +3 -0
- model-00009-of-00010.safetensors +3 -0
- model-00010-of-00010.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_internvl_chat.py +359 -0
- recipe.yaml +7 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +280 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,986 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
- zh
|
5 |
+
tags:
|
6 |
+
- fp8
|
7 |
+
- quantization
|
8 |
+
- static
|
9 |
+
- vision-language
|
10 |
+
- multimodal
|
11 |
+
- vllm
|
12 |
+
- llm-compressor
|
13 |
+
- internvl3
|
14 |
+
pipeline_tag: image-text-to-text
|
15 |
+
inference: false
|
16 |
+
license: mit
|
17 |
+
---
|
18 |
+
|
19 |
+
# 🔥 InternVL3-38B-FP8-Static: Optimized Vision-Language Model 🔥
|
20 |
+
|
21 |
+
This is a **FP8 static quantized** version of [OpenGVLab/InternVL3-38B](https://huggingface.co/OpenGVLab/InternVL3-38B), optimized for high-performance inference with vLLM.
|
22 |
+
|
23 |
+
The model utilizes **static FP8 quantization** for optimal inference performance, achieving ~2x speedup with minimal accuracy degradation on vision-language tasks.
|
24 |
+
|
25 |
+
## 🚀 Key Features
|
26 |
+
|
27 |
+
- **FP8 Static Quantization**: Maximum inference performance with pre-computed activation scales
|
28 |
+
- **Vision-Language Optimized**: Specialized quantization recipe that preserves visual understanding
|
29 |
+
- **vLLM Ready**: Seamless integration with vLLM for production deployment
|
30 |
+
- **Memory Efficient**: ~50% memory reduction compared to FP16 original
|
31 |
+
- **Performance Boost**: Up to 2x faster inference on H100/L40S GPUs
|
32 |
+
|
33 |
+
## 📊 Model Details
|
34 |
+
|
35 |
+
- **Original Model**: [OpenGVLab/InternVL3-38B](https://huggingface.co/OpenGVLab/InternVL3-38B)
|
36 |
+
- **Source Model**: OpenGVLab/InternVL3-38B
|
37 |
+
- **Quantized Model**: InternVL3-38B-FP8-Dynamic
|
38 |
+
- **Quantization Method**: FP8 Dynamic (W8A8)
|
39 |
+
- **Quantization Library**: [LLM Compressor](https://github.com/vllm-project/llm-compressor) v0.5.1
|
40 |
+
- **Calibration Dataset**: N/A
|
41 |
+
- **Attention Implementation**: Eager (standard attention, maximum compatibility)
|
42 |
+
- **Quantized by**: [JustJaro](https://huggingface.co/JustJaro)
|
43 |
+
|
44 |
+
## 🔧 Usage
|
45 |
+
|
46 |
+
### With vLLM (Recommended)
|
47 |
+
|
48 |
+
```python
|
49 |
+
from vllm import LLM, SamplingParams
|
50 |
+
|
51 |
+
# Load the quantized model
|
52 |
+
model = LLM(
|
53 |
+
model="JustJaro/InternVL3-38B-FP8-Dynamic",
|
54 |
+
trust_remote_code=True,
|
55 |
+
max_model_len=8192,
|
56 |
+
tensor_parallel_size=1, # Adjust based on your GPU setup
|
57 |
+
)
|
58 |
+
|
59 |
+
# Generate response
|
60 |
+
sampling_params = SamplingParams(temperature=0.7, max_tokens=512)
|
61 |
+
response = model.generate("Describe this image: <image>", sampling_params)
|
62 |
+
print(response[0].outputs[0].text)
|
63 |
+
```
|
64 |
+
|
65 |
+
### With Transformers + LLM Compressor
|
66 |
+
|
67 |
+
```python
|
68 |
+
from transformers import AutoTokenizer, AutoProcessor
|
69 |
+
from llmcompressor import LLM
|
70 |
+
|
71 |
+
model_id = "JustJaro/InternVL3-38B-FP8-Dynamic"
|
72 |
+
model = LLM.load(model_id, device="cuda")
|
73 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
74 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
75 |
+
|
76 |
+
# Process image and text
|
77 |
+
inputs = processor("What's in this image?", image, return_tensors="pt")
|
78 |
+
outputs = model.generate(**inputs, max_new_tokens=200)
|
79 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
80 |
+
print(response)
|
81 |
+
```
|
82 |
+
|
83 |
+
## 🏗️ Technical Specifications
|
84 |
+
|
85 |
+
### Hardware Requirements
|
86 |
+
|
87 |
+
- **Inference**: 40-50GB VRAM (single H100/A100 recommended)
|
88 |
+
- **Supported GPUs**: H100, L40S, A100 (80GB), RTX 4090 (2x for tensor parallelism)
|
89 |
+
- **GPU Architecture**: Ada Lovelace, Hopper (for optimal FP8 performance)
|
90 |
+
|
91 |
+
### Quantization Details
|
92 |
+
|
93 |
+
- **Weights**: FP8 E4M3 with static per-tensor scales
|
94 |
+
- **Activations**: FP8 E4M3 with static per-tensor scales
|
95 |
+
- **Preserved Components**: Vision tower, embeddings, normalization layers
|
96 |
+
- **Calibration**: 0 samples from multimodal dataset
|
97 |
+
|
98 |
+
## 📈 Performance Benchmarks
|
99 |
+
|
100 |
+
Expected performance improvements over FP16 baseline:
|
101 |
+
|
102 |
+
- **Throughput**: ~2x improvement on H100 GPUs
|
103 |
+
- **Memory**: ~50% reduction (76GB → 38GB)
|
104 |
+
- **Latency**: ~2x faster time-to-first-token
|
105 |
+
- **Accuracy**: >99% retention on vision-language benchmarks
|
106 |
+
|
107 |
+
## 🔬 Package Versions
|
108 |
+
|
109 |
+
This model was created using:
|
110 |
+
|
111 |
+
```
|
112 |
+
llmcompressor==0.5.1
|
113 |
+
transformers==4.52.4
|
114 |
+
torch==2.7.0+cu126
|
115 |
+
vllm==0.9.0.1
|
116 |
+
```
|
117 |
+
|
118 |
+
## 📋 Quantization Script
|
119 |
+
|
120 |
+
<details>
|
121 |
+
<summary>Click to view the complete quantization script</summary>
|
122 |
+
|
123 |
+
```python
|
124 |
+
#!/usr/bin/env python3
|
125 |
+
"""
|
126 |
+
InternVL3-38B FP8 Static Quantization Script using LLM Compressor
|
127 |
+
|
128 |
+
This script quantizes the OpenGVLab/InternVL3-38B vision-language model to FP8 static
|
129 |
+
quantization for optimal performance with vLLM inference. It uses the latest llm-compressor
|
130 |
+
library (v0.5.1+) with multimodal support.
|
131 |
+
|
132 |
+
## Setup
|
133 |
+
|
134 |
+
1. **Create a .env file** in the same directory as this script:
|
135 |
+
```bash
|
136 |
+
echo "HF_TOKEN=your_huggingface_token_here" > .env
|
137 |
+
```
|
138 |
+
|
139 |
+
2. **Get your HuggingFace token** from https://huggingface.co/settings/tokens
|
140 |
+
- You need write access to push models
|
141 |
+
- The token will be used to upload the quantized model
|
142 |
+
|
143 |
+
3. **Install dependencies**:
|
144 |
+
```bash
|
145 |
+
pip install llmcompressor>=0.5.1 transformers torch loguru typer python-dotenv datasets
|
146 |
+
```
|
147 |
+
|
148 |
+
## Usage
|
149 |
+
|
150 |
+
# Using HF_TOKEN from .env file (recommended)
|
151 |
+
python quantize_internvl3_fp8.py
|
152 |
+
|
153 |
+
# Or pass token directly (not recommended for security)
|
154 |
+
python quantize_internvl3_fp8.py --hf-token <YOUR_HF_TOKEN>
|
155 |
+
|
156 |
+
# Skip upload and save locally only
|
157 |
+
python quantize_internvl3_fp8.py --no-upload
|
158 |
+
|
159 |
+
# Disable flash attention (use SDPA attention instead)
|
160 |
+
python quantize_internvl3_fp8.py --no-flash-attn
|
161 |
+
|
162 |
+
# Use eager (standard) attention for maximum compatibility
|
163 |
+
python quantize_internvl3_fp8.py --no-flash-attn --attn-eager
|
164 |
+
|
165 |
+
# Use FP8-Dynamic quantization (no calibration needed)
|
166 |
+
python quantize_internvl3_fp8.py --dynamic
|
167 |
+
|
168 |
+
## Quantization Types
|
169 |
+
|
170 |
+
### FP8-Static (default)
|
171 |
+
- **Best for**: Production deployments, maximum inference performance
|
172 |
+
- **Pros**: Best inference speed, pre-computed scales, optimal for vLLM
|
173 |
+
- **Cons**: Requires calibration dataset, longer quantization process
|
174 |
+
- **Use when**: You want maximum performance and have time for calibration
|
175 |
+
|
176 |
+
### FP8-Dynamic
|
177 |
+
- **Best for**: Quick quantization, when calibration data is unavailable
|
178 |
+
- **Pros**: No calibration needed, faster quantization process, simpler setup
|
179 |
+
- **Cons**: Slightly lower inference performance than static
|
180 |
+
- **Use when**: You need quick results or lack calibration data (use `--dynamic`)
|
181 |
+
|
182 |
+
## Attention Mechanisms
|
183 |
+
|
184 |
+
### Flash Attention 2 (default)
|
185 |
+
- **Best for**: Modern GPUs (Ampere/Ada Lovelace), production deployments, long sequences
|
186 |
+
- **Pros**: Lowest memory usage (up to 10x reduction), fastest inference, best for large models
|
187 |
+
- **Cons**: Requires compatible GPU, may have issues with some model architectures
|
188 |
+
- **Use when**: You have a modern GPU and want maximum performance
|
189 |
+
|
190 |
+
### SDPA (Scaled Dot-Product Attention)
|
191 |
+
- **Best for**: Older GPUs, debugging, when flash attention fails
|
192 |
+
- **Pros**: Good performance, wide compatibility, native PyTorch implementation
|
193 |
+
- **Cons**: Higher memory usage than flash attention, slightly slower
|
194 |
+
- **Use when**: Flash attention isn't supported or causes issues (use `--no-flash-attn`)
|
195 |
+
|
196 |
+
### Eager (Standard) Attention
|
197 |
+
- **Best for**: Maximum compatibility, debugging attention-related issues
|
198 |
+
- **Pros**: Works everywhere, simplest implementation, easiest to debug
|
199 |
+
- **Cons**: Highest memory usage, slowest performance
|
200 |
+
- **Use when**: Both flash attention and SDPA cause issues (use `--no-flash-attn --attn-eager`)
|
201 |
+
|
202 |
+
## Important Notes
|
203 |
+
|
204 |
+
- The script will automatically upload the tokenizer files and README.md to HuggingFace
|
205 |
+
- All critical files (tokenizer_config.json, tokenizer.json/model, README.md) are verified before upload
|
206 |
+
- The upload process will list all uploaded files with their sizes for verification
|
207 |
+
- If upload fails, the quantized model is still saved locally and can be uploaded manually later
|
208 |
+
- For optimal vLLM performance, use the default flash attention unless you encounter compatibility issues
|
209 |
+
- **trust_remote_code_model=True** is set by default as required for InternVL3 and most VLM models
|
210 |
+
- For better memory management on multi-GPU setups, set: `export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True`
|
211 |
+
"""
|
212 |
+
|
213 |
+
import os
|
214 |
+
import shutil
|
215 |
+
import subprocess
|
216 |
+
import sys
|
217 |
+
from pathlib import Path
|
218 |
+
from typing import Optional
|
219 |
+
|
220 |
+
import torch
|
221 |
+
import typer
|
222 |
+
from loguru import logger
|
223 |
+
from dotenv import load_dotenv, find_dotenv
|
224 |
+
from huggingface_hub import HfApi, whoami
|
225 |
+
|
226 |
+
# Import llm-compressor modules
|
227 |
+
try:
|
228 |
+
from llmcompressor.modifiers.quantization import QuantizationModifier
|
229 |
+
from llmcompressor import oneshot
|
230 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor
|
231 |
+
from datasets import load_dataset, Dataset
|
232 |
+
except ImportError as e:
|
233 |
+
logger.error(f"Required packages not installed: {e}")
|
234 |
+
logger.error("Please install: pip install llmcompressor>=0.5.1 transformers torch loguru typer python-dotenv datasets")
|
235 |
+
sys.exit(1)
|
236 |
+
|
237 |
+
# Load environment variables
|
238 |
+
load_dotenv(find_dotenv())
|
239 |
+
|
240 |
+
app = typer.Typer(rich_markup_mode="rich")
|
241 |
+
|
242 |
+
# Configure loguru
|
243 |
+
logger.remove()
|
244 |
+
logger.add(sys.stderr, format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>")
|
245 |
+
logger.add("quantization.log", format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}")
|
246 |
+
|
247 |
+
# Constants
|
248 |
+
SOURCE_MODEL = "OpenGVLab/InternVL3-38B"
|
249 |
+
DEFAULT_HF_USERNAME = "JustJaro"
|
250 |
+
DEFAULT_CALIBRATION_DATASET = "neural-bridge/MS-COCO-2017-for-vlm-training"
|
251 |
+
DEFAULT_SAMPLES = 256
|
252 |
+
DEFAULT_SEQ_LEN = 2048
|
253 |
+
|
254 |
+
def get_quantized_model_name(dynamic: bool) -> str:
|
255 |
+
return f"InternVL3-38B-FP8-{'Dynamic' if dynamic else 'Static'}"
|
256 |
+
|
257 |
+
def check_gpu_memory():
|
258 |
+
"""Check available GPU memory and configure for multi-GPU setup."""
|
259 |
+
if not torch.cuda.is_available():
|
260 |
+
logger.warning("No GPU detected - quantization will be very slow")
|
261 |
+
return
|
262 |
+
|
263 |
+
gpu_count = torch.cuda.device_count()
|
264 |
+
logger.info(f"Found {gpu_count} GPU(s)")
|
265 |
+
|
266 |
+
total_memory = 0
|
267 |
+
for i in range(gpu_count):
|
268 |
+
props = torch.cuda.get_device_properties(i)
|
269 |
+
memory_gb = props.total_memory / (1024**3)
|
270 |
+
total_memory += memory_gb
|
271 |
+
logger.info(f" GPU {i}: {props.name} ({memory_gb:.1f} GB)")
|
272 |
+
|
273 |
+
logger.info(f"Total GPU memory: {total_memory:.1f} GB")
|
274 |
+
|
275 |
+
# Check if we have enough memory for the model
|
276 |
+
if total_memory < 150: # InternVL3-38B needs ~134GB peak
|
277 |
+
logger.warning("⚠️ Total GPU memory may be insufficient for quantization")
|
278 |
+
logger.warning(" Consider using PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True")
|
279 |
+
else:
|
280 |
+
logger.success(f"✅ Sufficient GPU memory available ({total_memory:.1f} GB >= 150 GB recommended)")
|
281 |
+
|
282 |
+
def get_package_versions() -> dict:
|
283 |
+
"""Get installed package versions for reproducibility."""
|
284 |
+
try:
|
285 |
+
import pkg_resources
|
286 |
+
packages = ['llmcompressor', 'transformers', 'torch', 'vllm']
|
287 |
+
versions = {}
|
288 |
+
for pkg in packages:
|
289 |
+
try:
|
290 |
+
version = pkg_resources.get_distribution(pkg).version
|
291 |
+
versions[pkg] = version
|
292 |
+
except pkg_resources.DistributionNotFound:
|
293 |
+
versions[pkg] = "not installed"
|
294 |
+
return versions
|
295 |
+
except Exception as e:
|
296 |
+
logger.warning(f"Could not get package versions: {e}")
|
297 |
+
return {}
|
298 |
+
|
299 |
+
def get_hf_username(hf_token: str) -> str:
|
300 |
+
"""Get Hugging Face username from token."""
|
301 |
+
try:
|
302 |
+
api = HfApi(token=hf_token)
|
303 |
+
user_info = whoami(token=hf_token)
|
304 |
+
username = user_info.get("name") or user_info.get("fullname") or DEFAULT_HF_USERNAME
|
305 |
+
logger.info(f"Hugging Face username: {username}")
|
306 |
+
return username
|
307 |
+
except Exception as e:
|
308 |
+
logger.warning(f"Could not get HF username: {e}, using default: {DEFAULT_HF_USERNAME}")
|
309 |
+
return DEFAULT_HF_USERNAME
|
310 |
+
|
311 |
+
def create_quantization_recipe(dynamic: bool = False) -> list:
|
312 |
+
"""Create FP8 quantization recipe for VLM."""
|
313 |
+
scheme = "FP8_DYNAMIC" if dynamic else "FP8"
|
314 |
+
|
315 |
+
logger.info(f"Creating {scheme} quantization recipe for vision-language model")
|
316 |
+
|
317 |
+
if dynamic:
|
318 |
+
logger.info("Using FP8 Dynamic quantization:")
|
319 |
+
logger.info(" • No calibration data required")
|
320 |
+
logger.info(" • Activation scales computed during inference")
|
321 |
+
logger.info(" • Simpler quantization process")
|
322 |
+
logger.info(" • Slightly lower performance than static")
|
323 |
+
else:
|
324 |
+
logger.info("Using FP8 Static quantization:")
|
325 |
+
logger.info(" • Requires calibration data")
|
326 |
+
logger.info(" • Pre-computed activation scales")
|
327 |
+
logger.info(" • Best inference performance")
|
328 |
+
logger.info(" • More complex quantization process")
|
329 |
+
|
330 |
+
recipe = [
|
331 |
+
QuantizationModifier(
|
332 |
+
targets=["Linear"],
|
333 |
+
scheme=scheme,
|
334 |
+
ignore=[
|
335 |
+
"re:.*lm_head",
|
336 |
+
"re:.*vision.*",
|
337 |
+
"re:.*visual.*",
|
338 |
+
"re:.*image.*",
|
339 |
+
"re:.*patch_embed.*",
|
340 |
+
"re:.*pos_embed.*",
|
341 |
+
"re:.*norm.*",
|
342 |
+
"re:.*layernorm.*",
|
343 |
+
]
|
344 |
+
)
|
345 |
+
]
|
346 |
+
|
347 |
+
logger.info(f"Quantization recipe created with {scheme} scheme")
|
348 |
+
logger.info("Ignoring vision components for optimal compatibility")
|
349 |
+
|
350 |
+
return recipe
|
351 |
+
|
352 |
+
def validate_model_compatibility(model_id: str):
|
353 |
+
"""Validate that the model is compatible with quantization."""
|
354 |
+
logger.info(f"Validating model compatibility: {model_id}")
|
355 |
+
|
356 |
+
try:
|
357 |
+
# Try to load model config to check architecture
|
358 |
+
from transformers import AutoConfig
|
359 |
+
config = AutoConfig.from_pretrained(model_id, trust_remote_code=True)
|
360 |
+
logger.info(f"Model architecture: {config.model_type if hasattr(config, 'model_type') else 'Unknown'}")
|
361 |
+
logger.success("Model configuration loaded successfully")
|
362 |
+
except Exception as e:
|
363 |
+
logger.error(f"Could not load model configuration: {e}")
|
364 |
+
raise typer.Exit(1)
|
365 |
+
|
366 |
+
def estimate_memory_requirements(model_id: str) -> dict:
|
367 |
+
"""Estimate memory requirements for quantization process."""
|
368 |
+
# Rough estimates for InternVL3-38B
|
369 |
+
estimates = {
|
370 |
+
"original_model": 76, # GB (38B * 2 bytes for FP16)
|
371 |
+
"quantized_output": 38, # GB (38B * 1 byte for FP8)
|
372 |
+
"calibration_overhead": 20, # GB (estimated)
|
373 |
+
"total_peak": 134 # GB (original + output + overhead)
|
374 |
+
}
|
375 |
+
|
376 |
+
logger.info("Memory requirement estimates:")
|
377 |
+
for key, value in estimates.items():
|
378 |
+
logger.info(f" {key.replace('_', ' ').title()}: {value} GB")
|
379 |
+
|
380 |
+
return estimates
|
381 |
+
|
382 |
+
def generate_model_card(
|
383 |
+
source_model: str,
|
384 |
+
quantized_model_name: str,
|
385 |
+
hf_username: str,
|
386 |
+
calibration_dataset: str,
|
387 |
+
num_samples: int,
|
388 |
+
seq_length: int,
|
389 |
+
package_versions: dict,
|
390 |
+
script_content: str,
|
391 |
+
flash_attn_used: bool,
|
392 |
+
attention_implementation: str,
|
393 |
+
dynamic: bool = False
|
394 |
+
) -> str:
|
395 |
+
"""Generate comprehensive model card for the quantized VLM."""
|
396 |
+
|
397 |
+
# Determine attention description for model card
|
398 |
+
if attention_implementation == "flash_attention_2":
|
399 |
+
attention_desc = "Flash Attention 2 (memory efficient, fastest)"
|
400 |
+
elif attention_implementation == "sdpa":
|
401 |
+
attention_desc = "SDPA (PyTorch native, good compatibility)"
|
402 |
+
else: # eager
|
403 |
+
attention_desc = "Eager (standard attention, maximum compatibility)"
|
404 |
+
|
405 |
+
model_card = f"""---
|
406 |
+
language:
|
407 |
+
- en
|
408 |
+
- zh
|
409 |
+
tags:
|
410 |
+
- fp8
|
411 |
+
- quantization
|
412 |
+
- static
|
413 |
+
- vision-language
|
414 |
+
- multimodal
|
415 |
+
- vllm
|
416 |
+
- llm-compressor
|
417 |
+
- internvl3
|
418 |
+
pipeline_tag: image-text-to-text
|
419 |
+
inference: false
|
420 |
+
license: mit
|
421 |
+
---
|
422 |
+
|
423 |
+
# 🔥 InternVL3-38B-FP8-Static: Optimized Vision-Language Model 🔥
|
424 |
+
|
425 |
+
This is a **FP8 static quantized** version of [{source_model}](https://huggingface.co/{source_model}), optimized for high-performance inference with vLLM.
|
426 |
+
|
427 |
+
The model utilizes **static FP8 quantization** for optimal inference performance, achieving ~2x speedup with minimal accuracy degradation on vision-language tasks.
|
428 |
+
|
429 |
+
## 🚀 Key Features
|
430 |
+
|
431 |
+
- **FP8 Static Quantization**: Maximum inference performance with pre-computed activation scales
|
432 |
+
- **Vision-Language Optimized**: Specialized quantization recipe that preserves visual understanding
|
433 |
+
- **vLLM Ready**: Seamless integration with vLLM for production deployment
|
434 |
+
- **Memory Efficient**: ~50% memory reduction compared to FP16 original
|
435 |
+
- **Performance Boost**: Up to 2x faster inference on H100/L40S GPUs
|
436 |
+
|
437 |
+
## 📊 Model Details
|
438 |
+
|
439 |
+
- **Original Model**: [{source_model}](https://huggingface.co/{source_model})
|
440 |
+
- **Source Model**: {source_model}
|
441 |
+
- **Quantized Model**: {quantized_model_name}
|
442 |
+
- **Quantization Method**: FP8 {'Dynamic' if dynamic else 'Static'} (W8A8)
|
443 |
+
- **Quantization Library**: [LLM Compressor](https://github.com/vllm-project/llm-compressor) v{package_versions.get('llmcompressor', 'latest')}
|
444 |
+
- **Calibration Dataset**: {calibration_dataset}{f' ({num_samples} samples, seq_len={seq_length})' if not dynamic else ''}
|
445 |
+
- **Attention Implementation**: {attention_desc}
|
446 |
+
- **Quantized by**: [{hf_username}](https://huggingface.co/{hf_username})
|
447 |
+
|
448 |
+
## 🔧 Usage
|
449 |
+
|
450 |
+
### With vLLM (Recommended)
|
451 |
+
|
452 |
+
```python
|
453 |
+
from vllm import LLM, SamplingParams
|
454 |
+
|
455 |
+
# Load the quantized model
|
456 |
+
model = LLM(
|
457 |
+
model="{hf_username}/{quantized_model_name}",
|
458 |
+
trust_remote_code=True,
|
459 |
+
max_model_len=8192,
|
460 |
+
tensor_parallel_size=1, # Adjust based on your GPU setup
|
461 |
+
)
|
462 |
+
|
463 |
+
# Generate response
|
464 |
+
sampling_params = SamplingParams(temperature=0.7, max_tokens=512)
|
465 |
+
response = model.generate("Describe this image: <image>", sampling_params)
|
466 |
+
print(response[0].outputs[0].text)
|
467 |
+
```
|
468 |
+
|
469 |
+
### With Transformers + LLM Compressor
|
470 |
+
|
471 |
+
```python
|
472 |
+
from transformers import AutoTokenizer, AutoProcessor
|
473 |
+
from llmcompressor import LLM
|
474 |
+
|
475 |
+
model_id = "{hf_username}/{quantized_model_name}"
|
476 |
+
model = LLM.load(model_id, device="cuda")
|
477 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
478 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
479 |
+
|
480 |
+
# Process image and text
|
481 |
+
inputs = processor("What's in this image?", image, return_tensors="pt")
|
482 |
+
outputs = model.generate(**inputs, max_new_tokens=200)
|
483 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
484 |
+
print(response)
|
485 |
+
```
|
486 |
+
|
487 |
+
## 🏗️ Technical Specifications
|
488 |
+
|
489 |
+
### Hardware Requirements
|
490 |
+
|
491 |
+
- **Inference**: 40-50GB VRAM (single H100/A100 recommended)
|
492 |
+
- **Supported GPUs**: H100, L40S, A100 (80GB), RTX 4090 (2x for tensor parallelism)
|
493 |
+
- **GPU Architecture**: Ada Lovelace, Hopper (for optimal FP8 performance)
|
494 |
+
|
495 |
+
### Quantization Details
|
496 |
+
|
497 |
+
- **Weights**: FP8 E4M3 with static per-tensor scales
|
498 |
+
- **Activations**: FP8 E4M3 with static per-tensor scales
|
499 |
+
- **Preserved Components**: Vision tower, embeddings, normalization layers
|
500 |
+
- **Calibration**: {num_samples} samples from multimodal dataset
|
501 |
+
|
502 |
+
## 📈 Performance Benchmarks
|
503 |
+
|
504 |
+
Expected performance improvements over FP16 baseline:
|
505 |
+
|
506 |
+
- **Throughput**: ~2x improvement on H100 GPUs
|
507 |
+
- **Memory**: ~50% reduction (76GB → 38GB)
|
508 |
+
- **Latency**: ~2x faster time-to-first-token
|
509 |
+
- **Accuracy**: >99% retention on vision-language benchmarks
|
510 |
+
|
511 |
+
## 🔬 Package Versions
|
512 |
+
|
513 |
+
This model was created using:
|
514 |
+
|
515 |
+
```
|
516 |
+
llmcompressor=={package_versions.get('llmcompressor', 'latest')}
|
517 |
+
transformers=={package_versions.get('transformers', 'latest')}
|
518 |
+
torch=={package_versions.get('torch', 'latest')}
|
519 |
+
vllm=={package_versions.get('vllm', 'latest')}
|
520 |
+
```
|
521 |
+
|
522 |
+
## 📋 Quantization Script
|
523 |
+
|
524 |
+
<details>
|
525 |
+
<summary>Click to view the complete quantization script</summary>
|
526 |
+
|
527 |
+
```python
|
528 |
+
{script_content}
|
529 |
+
```
|
530 |
+
|
531 |
+
</details>
|
532 |
+
|
533 |
+
## 🎯 Use Cases
|
534 |
+
|
535 |
+
This optimized model is ideal for:
|
536 |
+
|
537 |
+
- **Production VLM serving** with high throughput requirements
|
538 |
+
- **Real-time image analysis** and visual question answering
|
539 |
+
- **Document AI** and OCR applications
|
540 |
+
- **Multimodal chatbots** and virtual assistants
|
541 |
+
- **Edge deployment** on high-end GPUs
|
542 |
+
|
543 |
+
## ⚠️ Important Notes
|
544 |
+
|
545 |
+
- Requires GPU with FP8 support (H100, L40S) for optimal performance
|
546 |
+
- Falls back to FP8-Marlin on Ampere GPUs (A100) with reduced benefits
|
547 |
+
- Vision components preserved in FP16 for maximum compatibility
|
548 |
+
- Calibrated with diverse multimodal data for robust performance
|
549 |
+
|
550 |
+
## 🚫 Limitations
|
551 |
+
|
552 |
+
- **Specialized hardware**: Best performance requires H100-class GPUs
|
553 |
+
- **Model size**: Still requires significant VRAM despite quantization
|
554 |
+
- **Research use**: Inherits license and usage restrictions from base model
|
555 |
+
|
556 |
+
## 📄 License
|
557 |
+
|
558 |
+
This quantized model inherits the license from the original model.
|
559 |
+
Original model: [{source_model}](https://huggingface.co/{source_model})
|
560 |
+
|
561 |
+
## 🙏 Acknowledgments
|
562 |
+
|
563 |
+
- **Original Model**: OpenGVLab team for InternVL3-38B
|
564 |
+
- **Quantization**: LLM Compressor and Neural Magic team
|
565 |
+
- **Inference**: vLLM project for optimized serving
|
566 |
+
|
567 |
+
## 📞 Contact
|
568 |
+
|
569 |
+
For questions about this quantized model:
|
570 |
+
- **Issues**: [Create an issue](https://huggingface.co/{hf_username}/{quantized_model_name}/discussions)
|
571 |
+
- **Original Model**: Refer to [{source_model}](https://huggingface.co/{source_model})
|
572 |
+
|
573 |
+
---
|
574 |
+
|
575 |
+
*Quantized with ❤️ using LLM Compressor for the open-source community*
|
576 |
+
"""
|
577 |
+
|
578 |
+
return model_card
|
579 |
+
|
580 |
+
def read_script_content() -> str:
|
581 |
+
"""Read the current script content for inclusion in model card."""
|
582 |
+
try:
|
583 |
+
script_path = Path(__file__).resolve()
|
584 |
+
with open(script_path, 'r', encoding='utf-8') as f:
|
585 |
+
return f.read()
|
586 |
+
except Exception as e:
|
587 |
+
logger.warning(f"Could not read script content: {e}")
|
588 |
+
return "Script content unavailable"
|
589 |
+
|
590 |
+
@app.command()
|
591 |
+
def main(
|
592 |
+
source_model: str = typer.Option(
|
593 |
+
SOURCE_MODEL,
|
594 |
+
help="Source model to quantize (HuggingFace model ID)"
|
595 |
+
),
|
596 |
+
hf_token: Optional[str] = typer.Option(
|
597 |
+
None,
|
598 |
+
help="Hugging Face token for uploading (can be set via HF_TOKEN env var in .env file)",
|
599 |
+
envvar="HF_TOKEN"
|
600 |
+
),
|
601 |
+
calibration_dataset: str = typer.Option(
|
602 |
+
DEFAULT_CALIBRATION_DATASET,
|
603 |
+
help="Calibration dataset for static quantization"
|
604 |
+
),
|
605 |
+
num_samples: int = typer.Option(
|
606 |
+
DEFAULT_SAMPLES,
|
607 |
+
help="Number of calibration samples"
|
608 |
+
),
|
609 |
+
seq_length: int = typer.Option(
|
610 |
+
DEFAULT_SEQ_LEN,
|
611 |
+
help="Maximum sequence length for calibration"
|
612 |
+
),
|
613 |
+
output_dir: Optional[Path] = typer.Option(
|
614 |
+
None,
|
615 |
+
help="Output directory (default: ~/models/quantized/{model_name})"
|
616 |
+
),
|
617 |
+
upload: bool = typer.Option(
|
618 |
+
True,
|
619 |
+
help="Upload to Hugging Face Hub"
|
620 |
+
),
|
621 |
+
force: bool = typer.Option(
|
622 |
+
False,
|
623 |
+
help="Overwrite existing output directory"
|
624 |
+
),
|
625 |
+
dry_run: bool = typer.Option(
|
626 |
+
False,
|
627 |
+
help="Validate setup without actually quantizing"
|
628 |
+
),
|
629 |
+
no_flash_attn: bool = typer.Option(
|
630 |
+
False,
|
631 |
+
help="Disable flash attention and use SDPA (Scaled Dot-Product Attention) instead - good for compatibility"
|
632 |
+
),
|
633 |
+
attn_eager: bool = typer.Option(
|
634 |
+
False,
|
635 |
+
help="Use eager (standard) attention instead of SDPA - maximum compatibility but slower"
|
636 |
+
),
|
637 |
+
dynamic: bool = typer.Option(
|
638 |
+
False,
|
639 |
+
"--dynamic",
|
640 |
+
help="Use FP8-Dynamic quantization instead of FP8-Static (no calibration needed)"
|
641 |
+
)
|
642 |
+
):
|
643 |
+
"""
|
644 |
+
Quantize InternVL3-38B to FP8 static format for optimal vLLM inference.
|
645 |
+
|
646 |
+
This script performs FP8 static quantization which provides the best performance
|
647 |
+
for production serving compared to dynamic quantization.
|
648 |
+
"""
|
649 |
+
|
650 |
+
logger.info("🚀 Starting InternVL3-38B FP8 Static Quantization")
|
651 |
+
logger.info(f"Source model: {source_model}")
|
652 |
+
|
653 |
+
# Check for memory management environment variable
|
654 |
+
cuda_alloc_conf = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', 'Not set')
|
655 |
+
if 'expandable_segments:True' not in cuda_alloc_conf:
|
656 |
+
logger.warning("💡 For better memory management, consider setting:")
|
657 |
+
logger.warning(" export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True")
|
658 |
+
else:
|
659 |
+
logger.info("✅ PYTORCH_CUDA_ALLOC_CONF is configured for optimal memory management")
|
660 |
+
|
661 |
+
# Validate HF token
|
662 |
+
if upload and not hf_token:
|
663 |
+
logger.error("HF_TOKEN required for upload. Set via --hf-token or HF_TOKEN env var")
|
664 |
+
raise typer.Exit(1)
|
665 |
+
|
666 |
+
# Setup paths
|
667 |
+
quantized_model_name = get_quantized_model_name(dynamic)
|
668 |
+
if not output_dir:
|
669 |
+
output_dir = Path.home() / "models" / "quantized" / quantized_model_name
|
670 |
+
|
671 |
+
output_dir = Path(output_dir).resolve()
|
672 |
+
logger.info(f"Output directory: {output_dir}")
|
673 |
+
|
674 |
+
if output_dir.exists() and not force:
|
675 |
+
logger.error(f"Output directory exists: {output_dir}")
|
676 |
+
logger.error("Use --force to overwrite or choose different path")
|
677 |
+
raise typer.Exit(1)
|
678 |
+
|
679 |
+
# Pre-flight checks
|
680 |
+
logger.info("🔍 Running pre-flight checks...")
|
681 |
+
check_gpu_memory()
|
682 |
+
validate_model_compatibility(source_model)
|
683 |
+
estimate_memory_requirements(source_model)
|
684 |
+
|
685 |
+
# Get package versions and user info
|
686 |
+
package_versions = get_package_versions()
|
687 |
+
hf_username = get_hf_username(hf_token) if hf_token else DEFAULT_HF_USERNAME
|
688 |
+
|
689 |
+
logger.info(f"Using packages: {package_versions}")
|
690 |
+
|
691 |
+
if dry_run:
|
692 |
+
logger.info("✅ Dry run completed successfully")
|
693 |
+
logger.info("All checks passed - ready for quantization")
|
694 |
+
return
|
695 |
+
|
696 |
+
# Create output directory
|
697 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
698 |
+
|
699 |
+
try:
|
700 |
+
logger.info("📥 Loading model and tokenizer...")
|
701 |
+
logger.warning("This will require significant GPU memory - monitor your VRAM usage")
|
702 |
+
|
703 |
+
# Validate attention configuration
|
704 |
+
if attn_eager and not no_flash_attn:
|
705 |
+
logger.warning("⚠️ --attn-eager requires --no-flash-attn, automatically disabling flash attention")
|
706 |
+
no_flash_attn = True
|
707 |
+
|
708 |
+
# Determine attention implementation
|
709 |
+
if not torch.cuda.is_available():
|
710 |
+
if attn_eager:
|
711 |
+
logger.warning("⚠️ CUDA not available - using eager (standard) attention")
|
712 |
+
attn_implementation = "eager"
|
713 |
+
else:
|
714 |
+
logger.warning("⚠️ CUDA not available - using SDPA (scaled dot-product attention)")
|
715 |
+
attn_implementation = "sdpa"
|
716 |
+
elif no_flash_attn:
|
717 |
+
if attn_eager:
|
718 |
+
logger.info("🐌 Using eager (standard) attention as requested")
|
719 |
+
logger.info(" Eager attention characteristics:")
|
720 |
+
logger.info(" • Maximum compatibility with all hardware")
|
721 |
+
logger.info(" • Simplest implementation (easiest to debug)")
|
722 |
+
logger.info(" • Higher memory usage than SDPA or flash attention")
|
723 |
+
logger.info(" • Slower than optimized implementations")
|
724 |
+
logger.info(" • Use only when other implementations cause issues")
|
725 |
+
attn_implementation = "eager"
|
726 |
+
else:
|
727 |
+
logger.info("📌 Flash attention disabled by user - using SDPA (Scaled Dot-Product Attention)")
|
728 |
+
logger.info(" SDPA provides:")
|
729 |
+
logger.info(" • Better compatibility across different GPU architectures")
|
730 |
+
logger.info(" • Good performance (faster than standard attention)")
|
731 |
+
logger.info(" • Native PyTorch implementation (no extra dependencies)")
|
732 |
+
logger.info(" • Slightly higher memory usage than flash attention")
|
733 |
+
attn_implementation = "sdpa"
|
734 |
+
else:
|
735 |
+
logger.info("⚡ Flash Attention 2 enabled")
|
736 |
+
logger.info(" Benefits:")
|
737 |
+
logger.info(" • Lowest memory usage (up to 10x reduction)")
|
738 |
+
logger.info(" • Fastest inference speed")
|
739 |
+
logger.info(" • Best for large models and long sequences")
|
740 |
+
logger.info(" • Requires compatible GPU (Ampere or newer)")
|
741 |
+
attn_implementation = "flash_attention_2"
|
742 |
+
|
743 |
+
# Load model with multimodal support across all GPUs
|
744 |
+
model = AutoModelForCausalLM.from_pretrained(
|
745 |
+
source_model,
|
746 |
+
torch_dtype=torch.bfloat16, # Use bfloat16 for stability
|
747 |
+
device_map="balanced", # Distribute more evenly across all 4 GPUs
|
748 |
+
trust_remote_code=True, # Required for InternVL3
|
749 |
+
attn_implementation=attn_implementation,
|
750 |
+
max_memory={i: "40GB" for i in range(torch.cuda.device_count())}, # Reserve some memory per GPU
|
751 |
+
)
|
752 |
+
|
753 |
+
# Load processor (handles both text and images)
|
754 |
+
processor = AutoProcessor.from_pretrained(
|
755 |
+
source_model,
|
756 |
+
trust_remote_code=True
|
757 |
+
)
|
758 |
+
|
759 |
+
logger.success("✅ Model and processor loaded successfully")
|
760 |
+
|
761 |
+
# Log GPU memory usage after loading
|
762 |
+
for i in range(torch.cuda.device_count()):
|
763 |
+
allocated = torch.cuda.memory_allocated(i) / (1024**3)
|
764 |
+
cached = torch.cuda.memory_reserved(i) / (1024**3)
|
765 |
+
logger.info(f" GPU {i}: {allocated:.1f}GB allocated, {cached:.1f}GB cached")
|
766 |
+
|
767 |
+
# Create quantization recipe
|
768 |
+
recipe = create_quantization_recipe(dynamic=dynamic)
|
769 |
+
|
770 |
+
# Handle output directory cleanup if force is enabled
|
771 |
+
if force and output_dir.exists():
|
772 |
+
logger.info(f"🗑️ Removing existing output directory: {output_dir}")
|
773 |
+
import shutil
|
774 |
+
shutil.rmtree(output_dir)
|
775 |
+
|
776 |
+
# Ensure output directory exists
|
777 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
778 |
+
|
779 |
+
if dynamic:
|
780 |
+
logger.info("🚀 Using FP8-Dynamic quantization - no calibration needed!")
|
781 |
+
logger.info("Note: trust_remote_code_model=True is set by default for VLM compatibility")
|
782 |
+
|
783 |
+
# For dynamic quantization, we can use the model directly without a dataset
|
784 |
+
oneshot(
|
785 |
+
model=model, # Use the already loaded model
|
786 |
+
recipe=recipe,
|
787 |
+
output_dir=str(output_dir),
|
788 |
+
trust_remote_code_model=True,
|
789 |
+
)
|
790 |
+
else:
|
791 |
+
logger.info("🔄 Starting FP8 static quantization...")
|
792 |
+
logger.info("This process will take 30-60 minutes depending on hardware")
|
793 |
+
logger.warning("Monitor GPU memory usage - process may require 120GB+ peak VRAM")
|
794 |
+
|
795 |
+
# Load calibration dataset
|
796 |
+
logger.info(f"📊 Using calibration dataset: {calibration_dataset}")
|
797 |
+
logger.info(f" Samples: {num_samples}, Max sequence length: {seq_length}")
|
798 |
+
|
799 |
+
# Clear GPU cache before quantization to ensure maximum available memory
|
800 |
+
import gc
|
801 |
+
gc.collect()
|
802 |
+
torch.cuda.empty_cache()
|
803 |
+
logger.info("🧹 Cleared GPU cache before quantization")
|
804 |
+
|
805 |
+
# Apply quantization with calibration dataset
|
806 |
+
oneshot(
|
807 |
+
model=model, # Use the already loaded model object to avoid double loading
|
808 |
+
dataset=calibration_dataset,
|
809 |
+
recipe=recipe,
|
810 |
+
output_dir=str(output_dir),
|
811 |
+
max_seq_length=seq_length,
|
812 |
+
num_calibration_samples=num_samples,
|
813 |
+
trust_remote_code_model=True,
|
814 |
+
)
|
815 |
+
|
816 |
+
logger.success("🎉 Quantization completed successfully!")
|
817 |
+
|
818 |
+
# Save processor and tokenizer alongside quantized model
|
819 |
+
logger.info("💾 Saving processor and tokenizer configuration...")
|
820 |
+
processor.save_pretrained(output_dir)
|
821 |
+
|
822 |
+
# Also save tokenizer explicitly to ensure all tokenizer files are saved
|
823 |
+
tokenizer = AutoTokenizer.from_pretrained(source_model, trust_remote_code=True)
|
824 |
+
tokenizer.save_pretrained(output_dir)
|
825 |
+
logger.success("✅ Tokenizer and processor saved successfully")
|
826 |
+
|
827 |
+
# Generate and save model card
|
828 |
+
logger.info("📝 Generating model card...")
|
829 |
+
script_content = read_script_content()
|
830 |
+
model_card = generate_model_card(
|
831 |
+
source_model=source_model,
|
832 |
+
quantized_model_name=quantized_model_name,
|
833 |
+
hf_username=hf_username,
|
834 |
+
calibration_dataset=calibration_dataset if not dynamic else "N/A",
|
835 |
+
num_samples=num_samples if not dynamic else 0,
|
836 |
+
seq_length=seq_length if not dynamic else 0,
|
837 |
+
package_versions=package_versions,
|
838 |
+
script_content=script_content,
|
839 |
+
flash_attn_used=not no_flash_attn and torch.cuda.is_available(),
|
840 |
+
attention_implementation=attn_implementation,
|
841 |
+
dynamic=dynamic
|
842 |
+
)
|
843 |
+
|
844 |
+
model_card_path = output_dir / "README.md"
|
845 |
+
with open(model_card_path, 'w', encoding='utf-8') as f:
|
846 |
+
f.write(model_card)
|
847 |
+
|
848 |
+
logger.success(f"📄 Model card saved: {model_card_path}")
|
849 |
+
|
850 |
+
# Upload to Hugging Face Hub
|
851 |
+
if upload and hf_token:
|
852 |
+
logger.info("⬆️ Uploading to Hugging Face Hub...")
|
853 |
+
|
854 |
+
# Verify critical files exist before upload
|
855 |
+
critical_files = ["README.md", "tokenizer_config.json", "tokenizer.json"]
|
856 |
+
missing_files = []
|
857 |
+
|
858 |
+
for file in critical_files:
|
859 |
+
file_path = output_dir / file
|
860 |
+
if file_path.exists():
|
861 |
+
logger.info(f"✅ Found {file}")
|
862 |
+
else:
|
863 |
+
# Some models might use different tokenizer files
|
864 |
+
if file == "tokenizer.json":
|
865 |
+
# Check for alternative tokenizer files
|
866 |
+
alt_files = ["tokenizer.model", "vocab.json", "merges.txt"]
|
867 |
+
found_alt = any((output_dir / alt).exists() for alt in alt_files)
|
868 |
+
if found_alt:
|
869 |
+
logger.info(f"✅ Found alternative tokenizer files")
|
870 |
+
else:
|
871 |
+
missing_files.append(file)
|
872 |
+
else:
|
873 |
+
missing_files.append(file)
|
874 |
+
|
875 |
+
if missing_files:
|
876 |
+
logger.warning(f"⚠️ Missing files: {', '.join(missing_files)}")
|
877 |
+
|
878 |
+
try:
|
879 |
+
from huggingface_hub import HfApi
|
880 |
+
|
881 |
+
api = HfApi(token=hf_token)
|
882 |
+
|
883 |
+
# Create repository if it doesn't exist
|
884 |
+
repo_id = f"{hf_username}/{quantized_model_name}"
|
885 |
+
logger.info(f"Creating/updating repository: {repo_id}")
|
886 |
+
|
887 |
+
try:
|
888 |
+
api.create_repo(repo_id=repo_id, private=False, exist_ok=True)
|
889 |
+
logger.info("✅ Repository created/verified")
|
890 |
+
except Exception as repo_e:
|
891 |
+
logger.warning(f"Repository creation warning: {repo_e}")
|
892 |
+
|
893 |
+
# Upload folder contents
|
894 |
+
logger.info("📤 Uploading model files...")
|
895 |
+
api.upload_folder(
|
896 |
+
folder_path=str(output_dir),
|
897 |
+
repo_id=repo_id,
|
898 |
+
repo_type="model"
|
899 |
+
)
|
900 |
+
|
901 |
+
logger.success("🎉 Model uploaded successfully!")
|
902 |
+
logger.success(f"🔗 View at: https://huggingface.co/{hf_username}/{quantized_model_name}")
|
903 |
+
|
904 |
+
# List uploaded files
|
905 |
+
logger.info("Uploaded files include:")
|
906 |
+
for file in output_dir.iterdir():
|
907 |
+
if file.is_file():
|
908 |
+
size_mb = file.stat().st_size / (1024 * 1024)
|
909 |
+
logger.info(f" - {file.name} ({size_mb:.1f} MB)")
|
910 |
+
|
911 |
+
except Exception as e:
|
912 |
+
logger.error(f"Upload failed: {e}")
|
913 |
+
logger.info("Model saved locally - you can upload manually later")
|
914 |
+
|
915 |
+
# Final summary
|
916 |
+
logger.info("✨ Quantization Summary:")
|
917 |
+
logger.info(f" 📁 Model saved to: {output_dir}")
|
918 |
+
logger.info(f" 🔢 Quantization type: FP8-{'Dynamic' if dynamic else 'Static'}")
|
919 |
+
logger.info(" 🔢 Original size: ~76GB (FP16)")
|
920 |
+
logger.info(" 📉 Quantized size: ~38GB (FP8)")
|
921 |
+
logger.info(" 🚀 Expected speedup: ~2x on H100/L40S")
|
922 |
+
logger.info(" 💾 Memory savings: ~50%")
|
923 |
+
|
924 |
+
if upload and hf_token:
|
925 |
+
logger.info(f" 🌐 HuggingFace: https://huggingface.co/{hf_username}/{quantized_model_name}")
|
926 |
+
|
927 |
+
logger.success("🎊 Quantization pipeline completed successfully!")
|
928 |
+
|
929 |
+
except Exception as e:
|
930 |
+
logger.error(f"❌ Quantization failed: {type(e).__name__}: {str(e)}")
|
931 |
+
logger.error("Check logs above for detailed error information")
|
932 |
+
import traceback
|
933 |
+
logger.error("Full traceback:")
|
934 |
+
logger.error(traceback.format_exc())
|
935 |
+
raise typer.Exit(1)
|
936 |
+
|
937 |
+
if __name__ == "__main__":
|
938 |
+
app()
|
939 |
+
|
940 |
+
```
|
941 |
+
|
942 |
+
</details>
|
943 |
+
|
944 |
+
## 🎯 Use Cases
|
945 |
+
|
946 |
+
This optimized model is ideal for:
|
947 |
+
|
948 |
+
- **Production VLM serving** with high throughput requirements
|
949 |
+
- **Real-time image analysis** and visual question answering
|
950 |
+
- **Document AI** and OCR applications
|
951 |
+
- **Multimodal chatbots** and virtual assistants
|
952 |
+
- **Edge deployment** on high-end GPUs
|
953 |
+
|
954 |
+
## ⚠️ Important Notes
|
955 |
+
|
956 |
+
- Requires GPU with FP8 support (H100, L40S) for optimal performance
|
957 |
+
- Falls back to FP8-Marlin on Ampere GPUs (A100) with reduced benefits
|
958 |
+
- Vision components preserved in FP16 for maximum compatibility
|
959 |
+
- Calibrated with diverse multimodal data for robust performance
|
960 |
+
|
961 |
+
## 🚫 Limitations
|
962 |
+
|
963 |
+
- **Specialized hardware**: Best performance requires H100-class GPUs
|
964 |
+
- **Model size**: Still requires significant VRAM despite quantization
|
965 |
+
- **Research use**: Inherits license and usage restrictions from base model
|
966 |
+
|
967 |
+
## 📄 License
|
968 |
+
|
969 |
+
This quantized model inherits the license from the original model.
|
970 |
+
Original model: [OpenGVLab/InternVL3-38B](https://huggingface.co/OpenGVLab/InternVL3-38B)
|
971 |
+
|
972 |
+
## 🙏 Acknowledgments
|
973 |
+
|
974 |
+
- **Original Model**: OpenGVLab team for InternVL3-38B
|
975 |
+
- **Quantization**: LLM Compressor and Neural Magic team
|
976 |
+
- **Inference**: vLLM project for optimized serving
|
977 |
+
|
978 |
+
## 📞 Contact
|
979 |
+
|
980 |
+
For questions about this quantized model:
|
981 |
+
- **Issues**: [Create an issue](https://huggingface.co/JustJaro/InternVL3-38B-FP8-Dynamic/discussions)
|
982 |
+
- **Original Model**: Refer to [OpenGVLab/InternVL3-38B](https://huggingface.co/OpenGVLab/InternVL3-38B)
|
983 |
+
|
984 |
+
---
|
985 |
+
|
986 |
+
*Quantized with ❤️ using LLM Compressor for the open-source community*
|
added_tokens.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</box>": 151673,
|
3 |
+
"</img>": 151666,
|
4 |
+
"</quad>": 151669,
|
5 |
+
"</ref>": 151671,
|
6 |
+
"</tool_call>": 151658,
|
7 |
+
"<IMG_CONTEXT>": 151667,
|
8 |
+
"<box>": 151672,
|
9 |
+
"<img>": 151665,
|
10 |
+
"<quad>": 151668,
|
11 |
+
"<ref>": 151670,
|
12 |
+
"<tool_call>": 151657,
|
13 |
+
"<|box_end|>": 151649,
|
14 |
+
"<|box_start|>": 151648,
|
15 |
+
"<|endoftext|>": 151643,
|
16 |
+
"<|file_sep|>": 151664,
|
17 |
+
"<|fim_middle|>": 151660,
|
18 |
+
"<|fim_pad|>": 151662,
|
19 |
+
"<|fim_prefix|>": 151659,
|
20 |
+
"<|fim_suffix|>": 151661,
|
21 |
+
"<|im_end|>": 151645,
|
22 |
+
"<|im_start|>": 151644,
|
23 |
+
"<|image_pad|>": 151655,
|
24 |
+
"<|object_ref_end|>": 151647,
|
25 |
+
"<|object_ref_start|>": 151646,
|
26 |
+
"<|quad_end|>": 151651,
|
27 |
+
"<|quad_start|>": 151650,
|
28 |
+
"<|repo_name|>": 151663,
|
29 |
+
"<|video_pad|>": 151656,
|
30 |
+
"<|vision_end|>": 151653,
|
31 |
+
"<|vision_pad|>": 151654,
|
32 |
+
"<|vision_start|>": 151652
|
33 |
+
}
|
chat_template.jinja
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{%- if tools %}
|
2 |
+
{{- '<|im_start|>system\n' }}
|
3 |
+
{%- if messages[0]['role'] == 'system' %}
|
4 |
+
{{- messages[0]['content'] }}
|
5 |
+
{%- else %}
|
6 |
+
{{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
|
7 |
+
{%- endif %}
|
8 |
+
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
9 |
+
{%- for tool in tools %}
|
10 |
+
{{- "\n" }}
|
11 |
+
{{- tool | tojson }}
|
12 |
+
{%- endfor %}
|
13 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
14 |
+
{%- else %}
|
15 |
+
{%- if messages[0]['role'] == 'system' %}
|
16 |
+
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
17 |
+
{%- else %}
|
18 |
+
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
|
19 |
+
{%- endif %}
|
20 |
+
{%- endif %}
|
21 |
+
{%- for message in messages %}
|
22 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
23 |
+
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
24 |
+
{%- elif message.role == "assistant" %}
|
25 |
+
{{- '<|im_start|>' + message.role }}
|
26 |
+
{%- if message.content %}
|
27 |
+
{{- '\n' + message.content }}
|
28 |
+
{%- endif %}
|
29 |
+
{%- for tool_call in message.tool_calls %}
|
30 |
+
{%- if tool_call.function is defined %}
|
31 |
+
{%- set tool_call = tool_call.function %}
|
32 |
+
{%- endif %}
|
33 |
+
{{- '\n<tool_call>\n{"name": "' }}
|
34 |
+
{{- tool_call.name }}
|
35 |
+
{{- '", "arguments": ' }}
|
36 |
+
{{- tool_call.arguments | tojson }}
|
37 |
+
{{- '}\n</tool_call>' }}
|
38 |
+
{%- endfor %}
|
39 |
+
{{- '<|im_end|>\n' }}
|
40 |
+
{%- elif message.role == "tool" %}
|
41 |
+
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
42 |
+
{{- '<|im_start|>user' }}
|
43 |
+
{%- endif %}
|
44 |
+
{{- '\n<tool_response>\n' }}
|
45 |
+
{{- message.content }}
|
46 |
+
{{- '\n</tool_response>' }}
|
47 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
48 |
+
{{- '<|im_end|>\n' }}
|
49 |
+
{%- endif %}
|
50 |
+
{%- endif %}
|
51 |
+
{%- endfor %}
|
52 |
+
{%- if add_generation_prompt %}
|
53 |
+
{{- '<|im_start|>assistant\n' }}
|
54 |
+
{%- endif %}
|
config.json
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"InternVLChatModel"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "OpenGVLab/InternVL3-38B--configuration_internvl_chat.InternVLChatConfig",
|
7 |
+
"AutoModel": "OpenGVLab/InternVL3-38B--modeling_internvl_chat.InternVLChatModel",
|
8 |
+
"AutoModelForCausalLM": "OpenGVLab/InternVL3-38B--modeling_internvl_chat.InternVLChatModel"
|
9 |
+
},
|
10 |
+
"downsample_ratio": 0.5,
|
11 |
+
"dynamic_image_size": true,
|
12 |
+
"force_image_size": 448,
|
13 |
+
"hidden_size": 5120,
|
14 |
+
"image_fold": null,
|
15 |
+
"llm_config": {
|
16 |
+
"_name_or_path": "./pretrained/Qwen2.5-32B-Instruct",
|
17 |
+
"architectures": [
|
18 |
+
"Qwen2ForCausalLM"
|
19 |
+
],
|
20 |
+
"attention_dropout": 0.0,
|
21 |
+
"bos_token_id": 151643,
|
22 |
+
"eos_token_id": 151643,
|
23 |
+
"hidden_act": "silu",
|
24 |
+
"hidden_size": 5120,
|
25 |
+
"initializer_range": 0.02,
|
26 |
+
"intermediate_size": 27648,
|
27 |
+
"max_position_embeddings": 32768,
|
28 |
+
"max_window_layers": 70,
|
29 |
+
"model_type": "qwen2",
|
30 |
+
"moe_config": null,
|
31 |
+
"num_attention_heads": 40,
|
32 |
+
"num_hidden_layers": 64,
|
33 |
+
"num_key_value_heads": 8,
|
34 |
+
"rms_norm_eps": 1e-06,
|
35 |
+
"rope_scaling": {
|
36 |
+
"factor": 2.0,
|
37 |
+
"rope_type": "dynamic",
|
38 |
+
"type": "dynamic"
|
39 |
+
},
|
40 |
+
"rope_theta": 1000000.0,
|
41 |
+
"sliding_window": null,
|
42 |
+
"torch_dtype": "bfloat16",
|
43 |
+
"use_bfloat16": true,
|
44 |
+
"use_cache": false,
|
45 |
+
"use_sliding_window": false,
|
46 |
+
"vocab_size": 151674
|
47 |
+
},
|
48 |
+
"max_dynamic_patch": 12,
|
49 |
+
"min_dynamic_patch": 1,
|
50 |
+
"model_type": "internvl_chat",
|
51 |
+
"pad2square": false,
|
52 |
+
"ps_version": "v2",
|
53 |
+
"quantization_config": {
|
54 |
+
"config_groups": {
|
55 |
+
"group_0": {
|
56 |
+
"input_activations": {
|
57 |
+
"actorder": null,
|
58 |
+
"block_structure": null,
|
59 |
+
"dynamic": true,
|
60 |
+
"group_size": null,
|
61 |
+
"num_bits": 8,
|
62 |
+
"observer": null,
|
63 |
+
"observer_kwargs": {},
|
64 |
+
"strategy": "token",
|
65 |
+
"symmetric": true,
|
66 |
+
"type": "float"
|
67 |
+
},
|
68 |
+
"output_activations": null,
|
69 |
+
"targets": [
|
70 |
+
"Linear"
|
71 |
+
],
|
72 |
+
"weights": {
|
73 |
+
"actorder": null,
|
74 |
+
"block_structure": null,
|
75 |
+
"dynamic": false,
|
76 |
+
"group_size": null,
|
77 |
+
"num_bits": 8,
|
78 |
+
"observer": "minmax",
|
79 |
+
"observer_kwargs": {},
|
80 |
+
"strategy": "channel",
|
81 |
+
"symmetric": true,
|
82 |
+
"type": "float"
|
83 |
+
}
|
84 |
+
}
|
85 |
+
},
|
86 |
+
"format": "float-quantized",
|
87 |
+
"global_compression_ratio": null,
|
88 |
+
"ignore": [
|
89 |
+
"vision_model.encoder.layers.0.attn.qkv",
|
90 |
+
"vision_model.encoder.layers.0.attn.proj",
|
91 |
+
"vision_model.encoder.layers.0.mlp.fc1",
|
92 |
+
"vision_model.encoder.layers.0.mlp.fc2",
|
93 |
+
"vision_model.encoder.layers.1.attn.qkv",
|
94 |
+
"vision_model.encoder.layers.1.attn.proj",
|
95 |
+
"vision_model.encoder.layers.1.mlp.fc1",
|
96 |
+
"vision_model.encoder.layers.1.mlp.fc2",
|
97 |
+
"vision_model.encoder.layers.2.attn.qkv",
|
98 |
+
"vision_model.encoder.layers.2.attn.proj",
|
99 |
+
"vision_model.encoder.layers.2.mlp.fc1",
|
100 |
+
"vision_model.encoder.layers.2.mlp.fc2",
|
101 |
+
"vision_model.encoder.layers.3.attn.qkv",
|
102 |
+
"vision_model.encoder.layers.3.attn.proj",
|
103 |
+
"vision_model.encoder.layers.3.mlp.fc1",
|
104 |
+
"vision_model.encoder.layers.3.mlp.fc2",
|
105 |
+
"vision_model.encoder.layers.4.attn.qkv",
|
106 |
+
"vision_model.encoder.layers.4.attn.proj",
|
107 |
+
"vision_model.encoder.layers.4.mlp.fc1",
|
108 |
+
"vision_model.encoder.layers.4.mlp.fc2",
|
109 |
+
"vision_model.encoder.layers.5.attn.qkv",
|
110 |
+
"vision_model.encoder.layers.5.attn.proj",
|
111 |
+
"vision_model.encoder.layers.5.mlp.fc1",
|
112 |
+
"vision_model.encoder.layers.5.mlp.fc2",
|
113 |
+
"vision_model.encoder.layers.6.attn.qkv",
|
114 |
+
"vision_model.encoder.layers.6.attn.proj",
|
115 |
+
"vision_model.encoder.layers.6.mlp.fc1",
|
116 |
+
"vision_model.encoder.layers.6.mlp.fc2",
|
117 |
+
"vision_model.encoder.layers.7.attn.qkv",
|
118 |
+
"vision_model.encoder.layers.7.attn.proj",
|
119 |
+
"vision_model.encoder.layers.7.mlp.fc1",
|
120 |
+
"vision_model.encoder.layers.7.mlp.fc2",
|
121 |
+
"vision_model.encoder.layers.8.attn.qkv",
|
122 |
+
"vision_model.encoder.layers.8.attn.proj",
|
123 |
+
"vision_model.encoder.layers.8.mlp.fc1",
|
124 |
+
"vision_model.encoder.layers.8.mlp.fc2",
|
125 |
+
"vision_model.encoder.layers.9.attn.qkv",
|
126 |
+
"vision_model.encoder.layers.9.attn.proj",
|
127 |
+
"vision_model.encoder.layers.9.mlp.fc1",
|
128 |
+
"vision_model.encoder.layers.9.mlp.fc2",
|
129 |
+
"vision_model.encoder.layers.10.attn.qkv",
|
130 |
+
"vision_model.encoder.layers.10.attn.proj",
|
131 |
+
"vision_model.encoder.layers.10.mlp.fc1",
|
132 |
+
"vision_model.encoder.layers.10.mlp.fc2",
|
133 |
+
"vision_model.encoder.layers.11.attn.qkv",
|
134 |
+
"vision_model.encoder.layers.11.attn.proj",
|
135 |
+
"vision_model.encoder.layers.11.mlp.fc1",
|
136 |
+
"vision_model.encoder.layers.11.mlp.fc2",
|
137 |
+
"vision_model.encoder.layers.12.attn.qkv",
|
138 |
+
"vision_model.encoder.layers.12.attn.proj",
|
139 |
+
"vision_model.encoder.layers.12.mlp.fc1",
|
140 |
+
"vision_model.encoder.layers.12.mlp.fc2",
|
141 |
+
"vision_model.encoder.layers.13.attn.qkv",
|
142 |
+
"vision_model.encoder.layers.13.attn.proj",
|
143 |
+
"vision_model.encoder.layers.13.mlp.fc1",
|
144 |
+
"vision_model.encoder.layers.13.mlp.fc2",
|
145 |
+
"vision_model.encoder.layers.14.attn.qkv",
|
146 |
+
"vision_model.encoder.layers.14.attn.proj",
|
147 |
+
"vision_model.encoder.layers.14.mlp.fc1",
|
148 |
+
"vision_model.encoder.layers.14.mlp.fc2",
|
149 |
+
"vision_model.encoder.layers.15.attn.qkv",
|
150 |
+
"vision_model.encoder.layers.15.attn.proj",
|
151 |
+
"vision_model.encoder.layers.15.mlp.fc1",
|
152 |
+
"vision_model.encoder.layers.15.mlp.fc2",
|
153 |
+
"vision_model.encoder.layers.16.attn.qkv",
|
154 |
+
"vision_model.encoder.layers.16.attn.proj",
|
155 |
+
"vision_model.encoder.layers.16.mlp.fc1",
|
156 |
+
"vision_model.encoder.layers.16.mlp.fc2",
|
157 |
+
"vision_model.encoder.layers.17.attn.qkv",
|
158 |
+
"vision_model.encoder.layers.17.attn.proj",
|
159 |
+
"vision_model.encoder.layers.17.mlp.fc1",
|
160 |
+
"vision_model.encoder.layers.17.mlp.fc2",
|
161 |
+
"vision_model.encoder.layers.18.attn.qkv",
|
162 |
+
"vision_model.encoder.layers.18.attn.proj",
|
163 |
+
"vision_model.encoder.layers.18.mlp.fc1",
|
164 |
+
"vision_model.encoder.layers.18.mlp.fc2",
|
165 |
+
"vision_model.encoder.layers.19.attn.qkv",
|
166 |
+
"vision_model.encoder.layers.19.attn.proj",
|
167 |
+
"vision_model.encoder.layers.19.mlp.fc1",
|
168 |
+
"vision_model.encoder.layers.19.mlp.fc2",
|
169 |
+
"vision_model.encoder.layers.20.attn.qkv",
|
170 |
+
"vision_model.encoder.layers.20.attn.proj",
|
171 |
+
"vision_model.encoder.layers.20.mlp.fc1",
|
172 |
+
"vision_model.encoder.layers.20.mlp.fc2",
|
173 |
+
"vision_model.encoder.layers.21.attn.qkv",
|
174 |
+
"vision_model.encoder.layers.21.attn.proj",
|
175 |
+
"vision_model.encoder.layers.21.mlp.fc1",
|
176 |
+
"vision_model.encoder.layers.21.mlp.fc2",
|
177 |
+
"vision_model.encoder.layers.22.attn.qkv",
|
178 |
+
"vision_model.encoder.layers.22.attn.proj",
|
179 |
+
"vision_model.encoder.layers.22.mlp.fc1",
|
180 |
+
"vision_model.encoder.layers.22.mlp.fc2",
|
181 |
+
"vision_model.encoder.layers.23.attn.qkv",
|
182 |
+
"vision_model.encoder.layers.23.attn.proj",
|
183 |
+
"vision_model.encoder.layers.23.mlp.fc1",
|
184 |
+
"vision_model.encoder.layers.23.mlp.fc2",
|
185 |
+
"vision_model.encoder.layers.24.attn.qkv",
|
186 |
+
"vision_model.encoder.layers.24.attn.proj",
|
187 |
+
"vision_model.encoder.layers.24.mlp.fc1",
|
188 |
+
"vision_model.encoder.layers.24.mlp.fc2",
|
189 |
+
"vision_model.encoder.layers.25.attn.qkv",
|
190 |
+
"vision_model.encoder.layers.25.attn.proj",
|
191 |
+
"vision_model.encoder.layers.25.mlp.fc1",
|
192 |
+
"vision_model.encoder.layers.25.mlp.fc2",
|
193 |
+
"vision_model.encoder.layers.26.attn.qkv",
|
194 |
+
"vision_model.encoder.layers.26.attn.proj",
|
195 |
+
"vision_model.encoder.layers.26.mlp.fc1",
|
196 |
+
"vision_model.encoder.layers.26.mlp.fc2",
|
197 |
+
"vision_model.encoder.layers.27.attn.qkv",
|
198 |
+
"vision_model.encoder.layers.27.attn.proj",
|
199 |
+
"vision_model.encoder.layers.27.mlp.fc1",
|
200 |
+
"vision_model.encoder.layers.27.mlp.fc2",
|
201 |
+
"vision_model.encoder.layers.28.attn.qkv",
|
202 |
+
"vision_model.encoder.layers.28.attn.proj",
|
203 |
+
"vision_model.encoder.layers.28.mlp.fc1",
|
204 |
+
"vision_model.encoder.layers.28.mlp.fc2",
|
205 |
+
"vision_model.encoder.layers.29.attn.qkv",
|
206 |
+
"vision_model.encoder.layers.29.attn.proj",
|
207 |
+
"vision_model.encoder.layers.29.mlp.fc1",
|
208 |
+
"vision_model.encoder.layers.29.mlp.fc2",
|
209 |
+
"vision_model.encoder.layers.30.attn.qkv",
|
210 |
+
"vision_model.encoder.layers.30.attn.proj",
|
211 |
+
"vision_model.encoder.layers.30.mlp.fc1",
|
212 |
+
"vision_model.encoder.layers.30.mlp.fc2",
|
213 |
+
"vision_model.encoder.layers.31.attn.qkv",
|
214 |
+
"vision_model.encoder.layers.31.attn.proj",
|
215 |
+
"vision_model.encoder.layers.31.mlp.fc1",
|
216 |
+
"vision_model.encoder.layers.31.mlp.fc2",
|
217 |
+
"vision_model.encoder.layers.32.attn.qkv",
|
218 |
+
"vision_model.encoder.layers.32.attn.proj",
|
219 |
+
"vision_model.encoder.layers.32.mlp.fc1",
|
220 |
+
"vision_model.encoder.layers.32.mlp.fc2",
|
221 |
+
"vision_model.encoder.layers.33.attn.qkv",
|
222 |
+
"vision_model.encoder.layers.33.attn.proj",
|
223 |
+
"vision_model.encoder.layers.33.mlp.fc1",
|
224 |
+
"vision_model.encoder.layers.33.mlp.fc2",
|
225 |
+
"vision_model.encoder.layers.34.attn.qkv",
|
226 |
+
"vision_model.encoder.layers.34.attn.proj",
|
227 |
+
"vision_model.encoder.layers.34.mlp.fc1",
|
228 |
+
"vision_model.encoder.layers.34.mlp.fc2",
|
229 |
+
"vision_model.encoder.layers.35.attn.qkv",
|
230 |
+
"vision_model.encoder.layers.35.attn.proj",
|
231 |
+
"vision_model.encoder.layers.35.mlp.fc1",
|
232 |
+
"vision_model.encoder.layers.35.mlp.fc2",
|
233 |
+
"vision_model.encoder.layers.36.attn.qkv",
|
234 |
+
"vision_model.encoder.layers.36.attn.proj",
|
235 |
+
"vision_model.encoder.layers.36.mlp.fc1",
|
236 |
+
"vision_model.encoder.layers.36.mlp.fc2",
|
237 |
+
"vision_model.encoder.layers.37.attn.qkv",
|
238 |
+
"vision_model.encoder.layers.37.attn.proj",
|
239 |
+
"vision_model.encoder.layers.37.mlp.fc1",
|
240 |
+
"vision_model.encoder.layers.37.mlp.fc2",
|
241 |
+
"vision_model.encoder.layers.38.attn.qkv",
|
242 |
+
"vision_model.encoder.layers.38.attn.proj",
|
243 |
+
"vision_model.encoder.layers.38.mlp.fc1",
|
244 |
+
"vision_model.encoder.layers.38.mlp.fc2",
|
245 |
+
"vision_model.encoder.layers.39.attn.qkv",
|
246 |
+
"vision_model.encoder.layers.39.attn.proj",
|
247 |
+
"vision_model.encoder.layers.39.mlp.fc1",
|
248 |
+
"vision_model.encoder.layers.39.mlp.fc2",
|
249 |
+
"vision_model.encoder.layers.40.attn.qkv",
|
250 |
+
"vision_model.encoder.layers.40.attn.proj",
|
251 |
+
"vision_model.encoder.layers.40.mlp.fc1",
|
252 |
+
"vision_model.encoder.layers.40.mlp.fc2",
|
253 |
+
"vision_model.encoder.layers.41.attn.qkv",
|
254 |
+
"vision_model.encoder.layers.41.attn.proj",
|
255 |
+
"vision_model.encoder.layers.41.mlp.fc1",
|
256 |
+
"vision_model.encoder.layers.41.mlp.fc2",
|
257 |
+
"vision_model.encoder.layers.42.attn.qkv",
|
258 |
+
"vision_model.encoder.layers.42.attn.proj",
|
259 |
+
"vision_model.encoder.layers.42.mlp.fc1",
|
260 |
+
"vision_model.encoder.layers.42.mlp.fc2",
|
261 |
+
"vision_model.encoder.layers.43.attn.qkv",
|
262 |
+
"vision_model.encoder.layers.43.attn.proj",
|
263 |
+
"vision_model.encoder.layers.43.mlp.fc1",
|
264 |
+
"vision_model.encoder.layers.43.mlp.fc2",
|
265 |
+
"vision_model.encoder.layers.44.attn.qkv",
|
266 |
+
"vision_model.encoder.layers.44.attn.proj",
|
267 |
+
"vision_model.encoder.layers.44.mlp.fc1",
|
268 |
+
"vision_model.encoder.layers.44.mlp.fc2",
|
269 |
+
"language_model.lm_head"
|
270 |
+
],
|
271 |
+
"kv_cache_scheme": null,
|
272 |
+
"quant_method": "compressed-tensors",
|
273 |
+
"quantization_status": "compressed"
|
274 |
+
},
|
275 |
+
"select_layer": -1,
|
276 |
+
"system_message": null,
|
277 |
+
"template": "internvl2_5",
|
278 |
+
"tie_word_embeddings": false,
|
279 |
+
"torch_dtype": "bfloat16",
|
280 |
+
"transformers_version": null,
|
281 |
+
"use_backbone_lora": 0,
|
282 |
+
"use_llm_lora": 0,
|
283 |
+
"use_thumbnail": true,
|
284 |
+
"vision_config": {
|
285 |
+
"_name_or_path": "OpenGVLab/InternViT-6B-448px-V1-5",
|
286 |
+
"architectures": [
|
287 |
+
"InternVisionModel"
|
288 |
+
],
|
289 |
+
"attention_dropout": 0.0,
|
290 |
+
"auto_map": {
|
291 |
+
"AutoConfig": "configuration_intern_vit.InternVisionConfig",
|
292 |
+
"AutoModel": "modeling_intern_vit.InternVisionModel"
|
293 |
+
},
|
294 |
+
"capacity_factor": 1.2,
|
295 |
+
"drop_path_rate": 0.4,
|
296 |
+
"dropout": 0.0,
|
297 |
+
"eval_capacity_factor": 1.4,
|
298 |
+
"hidden_act": "gelu",
|
299 |
+
"hidden_size": 3200,
|
300 |
+
"image_size": 448,
|
301 |
+
"initializer_factor": 0.1,
|
302 |
+
"initializer_range": 1e-10,
|
303 |
+
"intermediate_size": 12800,
|
304 |
+
"laux_allreduce": "all_nodes",
|
305 |
+
"layer_norm_eps": 1e-06,
|
306 |
+
"model_type": "intern_vit_6b",
|
307 |
+
"moe_coeff_ratio": 0.5,
|
308 |
+
"moe_intermediate_size": 768,
|
309 |
+
"moe_output_scale": 4.0,
|
310 |
+
"noisy_gate_policy": "RSample_before",
|
311 |
+
"norm_type": "rms_norm",
|
312 |
+
"num_attention_heads": 25,
|
313 |
+
"num_channels": 3,
|
314 |
+
"num_experts": 8,
|
315 |
+
"num_hidden_layers": 45,
|
316 |
+
"num_routed_experts": 4,
|
317 |
+
"num_shared_experts": 4,
|
318 |
+
"patch_size": 14,
|
319 |
+
"qk_normalization": true,
|
320 |
+
"qkv_bias": false,
|
321 |
+
"shared_expert_intermediate_size": 3072,
|
322 |
+
"torch_dtype": "bfloat16",
|
323 |
+
"use_bfloat16": true,
|
324 |
+
"use_flash_attn": false,
|
325 |
+
"use_moe": false,
|
326 |
+
"use_residual": true,
|
327 |
+
"use_rts": false,
|
328 |
+
"use_weighted_residual": false
|
329 |
+
}
|
330 |
+
}
|
configuration_internvl_chat.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# InternVL
|
3 |
+
# Copyright (c) 2024 OpenGVLab
|
4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
5 |
+
# --------------------------------------------------------
|
6 |
+
|
7 |
+
import copy
|
8 |
+
|
9 |
+
from transformers import AutoConfig, LlamaConfig, Qwen2Config
|
10 |
+
from transformers.configuration_utils import PretrainedConfig
|
11 |
+
from transformers.utils import logging
|
12 |
+
|
13 |
+
from .configuration_intern_vit import InternVisionConfig
|
14 |
+
|
15 |
+
logger = logging.get_logger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
class InternVLChatConfig(PretrainedConfig):
|
19 |
+
model_type = 'internvl_chat'
|
20 |
+
is_composition = True
|
21 |
+
|
22 |
+
def __init__(
|
23 |
+
self,
|
24 |
+
vision_config=None,
|
25 |
+
llm_config=None,
|
26 |
+
use_backbone_lora=0,
|
27 |
+
use_llm_lora=0,
|
28 |
+
select_layer=-1,
|
29 |
+
force_image_size=None,
|
30 |
+
downsample_ratio=0.5,
|
31 |
+
template=None,
|
32 |
+
dynamic_image_size=False,
|
33 |
+
use_thumbnail=False,
|
34 |
+
ps_version='v1',
|
35 |
+
min_dynamic_patch=1,
|
36 |
+
max_dynamic_patch=6,
|
37 |
+
**kwargs):
|
38 |
+
super().__init__(**kwargs)
|
39 |
+
|
40 |
+
if vision_config is None:
|
41 |
+
vision_config = {'architectures': ['InternVisionModel']}
|
42 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
43 |
+
|
44 |
+
if llm_config is None:
|
45 |
+
llm_config = {'architectures': ['Qwen2ForCausalLM']}
|
46 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
47 |
+
|
48 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
49 |
+
if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
|
50 |
+
self.llm_config = LlamaConfig(**llm_config)
|
51 |
+
elif llm_config.get('architectures')[0] == 'Qwen2ForCausalLM':
|
52 |
+
self.llm_config = Qwen2Config(**llm_config)
|
53 |
+
else:
|
54 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
|
55 |
+
self.use_backbone_lora = use_backbone_lora
|
56 |
+
self.use_llm_lora = use_llm_lora
|
57 |
+
self.select_layer = select_layer
|
58 |
+
self.force_image_size = force_image_size
|
59 |
+
self.downsample_ratio = downsample_ratio
|
60 |
+
self.template = template
|
61 |
+
self.dynamic_image_size = dynamic_image_size
|
62 |
+
self.use_thumbnail = use_thumbnail
|
63 |
+
self.ps_version = ps_version # pixel shuffle version
|
64 |
+
self.min_dynamic_patch = min_dynamic_patch
|
65 |
+
self.max_dynamic_patch = max_dynamic_patch
|
66 |
+
# By default, we use tie_word_embeddings=False for models of all sizes.
|
67 |
+
self.tie_word_embeddings = self.llm_config.tie_word_embeddings
|
68 |
+
|
69 |
+
logger.info(f'vision_select_layer: {self.select_layer}')
|
70 |
+
logger.info(f'ps_version: {self.ps_version}')
|
71 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
72 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
73 |
+
|
74 |
+
def to_dict(self):
|
75 |
+
"""
|
76 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
77 |
+
|
78 |
+
Returns:
|
79 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
80 |
+
"""
|
81 |
+
output = copy.deepcopy(self.__dict__)
|
82 |
+
output['vision_config'] = self.vision_config.to_dict()
|
83 |
+
output['llm_config'] = self.llm_config.to_dict()
|
84 |
+
output['model_type'] = self.__class__.model_type
|
85 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
86 |
+
output['use_llm_lora'] = self.use_llm_lora
|
87 |
+
output['select_layer'] = self.select_layer
|
88 |
+
output['force_image_size'] = self.force_image_size
|
89 |
+
output['downsample_ratio'] = self.downsample_ratio
|
90 |
+
output['template'] = self.template
|
91 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
92 |
+
output['use_thumbnail'] = self.use_thumbnail
|
93 |
+
output['ps_version'] = self.ps_version
|
94 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
95 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
96 |
+
|
97 |
+
return output
|
generation_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"transformers_version": "4.52.4"
|
4 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0079961ce2bb8dba8f35ffd5655ecaf9f15ed940bb4f90cf60ae76943c6b19b2
|
3 |
+
size 4988569440
|
model-00002-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e374322e9bacb7b749f50777ef6c05f27daf8e54f81c8dece51601f9261634e
|
3 |
+
size 4937253584
|
model-00003-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a0fe100133daa3aa1e58856da6a19e56bf588702034266fd9d5ae52fa4abdb8
|
3 |
+
size 4997644696
|
model-00004-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcdf6c22706334bcafe760c5652431fb92e4d6029a42282f7816f1c1659f9210
|
3 |
+
size 4877704976
|
model-00005-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ced642fec8a1d304667a2e615c23aa194e33d80e4ff8e8a65f68d8c772d265a7
|
3 |
+
size 4877705072
|
model-00006-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18e38855208ece666302581997f68d0ad13c428abf03f1edd0345bf7b90d2b92
|
3 |
+
size 4877705072
|
model-00007-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1670c7382543e9716a98290ed4a587e7cf5521e44fc9e441d2862af9cfc102f9
|
3 |
+
size 4877705072
|
model-00008-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5958eee456593d09dab7339c8c2e6c89428e0591c2166d8ad1b208f3d287102f
|
3 |
+
size 4877705072
|
model-00009-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e1b82e6ff054488f0108b25e9c15a12908da4f5c96557dda3da5e7057c8aaa2
|
3 |
+
size 4531533888
|
model-00010-of-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcf8286b31dfbe09605da87ddaf8e8132b223516cd8770269befc8e8c701e3bb
|
3 |
+
size 1644985192
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
modeling_internvl_chat.py
ADDED
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# InternVL
|
3 |
+
# Copyright (c) 2024 OpenGVLab
|
4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
5 |
+
# --------------------------------------------------------
|
6 |
+
|
7 |
+
import warnings
|
8 |
+
from typing import List, Optional, Tuple, Union
|
9 |
+
|
10 |
+
import torch.utils.checkpoint
|
11 |
+
import transformers
|
12 |
+
from torch import nn
|
13 |
+
from torch.nn import CrossEntropyLoss
|
14 |
+
from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
|
15 |
+
Qwen2ForCausalLM)
|
16 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
17 |
+
from transformers.modeling_utils import PreTrainedModel
|
18 |
+
from transformers.utils import ModelOutput, logging
|
19 |
+
|
20 |
+
from .configuration_internvl_chat import InternVLChatConfig
|
21 |
+
from .conversation import get_conv_template
|
22 |
+
from .modeling_intern_vit import InternVisionModel, has_flash_attn
|
23 |
+
|
24 |
+
logger = logging.get_logger(__name__)
|
25 |
+
|
26 |
+
|
27 |
+
def version_cmp(v1, v2, op='eq'):
|
28 |
+
import operator
|
29 |
+
|
30 |
+
from packaging import version
|
31 |
+
op_func = getattr(operator, op)
|
32 |
+
return op_func(version.parse(v1), version.parse(v2))
|
33 |
+
|
34 |
+
|
35 |
+
class InternVLChatModel(PreTrainedModel):
|
36 |
+
config_class = InternVLChatConfig
|
37 |
+
main_input_name = 'pixel_values'
|
38 |
+
base_model_prefix = 'language_model'
|
39 |
+
_supports_flash_attn_2 = True
|
40 |
+
supports_gradient_checkpointing = True
|
41 |
+
_no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
|
42 |
+
|
43 |
+
def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
|
44 |
+
super().__init__(config)
|
45 |
+
|
46 |
+
assert version_cmp(transformers.__version__, '4.37.0', 'ge')
|
47 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
48 |
+
patch_size = config.vision_config.patch_size
|
49 |
+
self.patch_size = patch_size
|
50 |
+
self.select_layer = config.select_layer
|
51 |
+
self.template = config.template
|
52 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
53 |
+
self.downsample_ratio = config.downsample_ratio
|
54 |
+
self.ps_version = config.ps_version
|
55 |
+
use_flash_attn = use_flash_attn if has_flash_attn else False
|
56 |
+
config.vision_config.use_flash_attn = True if use_flash_attn else False
|
57 |
+
config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
|
58 |
+
|
59 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
60 |
+
logger.info(f'ps_version: {self.ps_version}')
|
61 |
+
if vision_model is not None:
|
62 |
+
self.vision_model = vision_model
|
63 |
+
else:
|
64 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
65 |
+
if language_model is not None:
|
66 |
+
self.language_model = language_model
|
67 |
+
else:
|
68 |
+
if config.llm_config.architectures[0] == 'LlamaForCausalLM':
|
69 |
+
self.language_model = LlamaForCausalLM(config.llm_config)
|
70 |
+
elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
|
71 |
+
self.language_model = Qwen2ForCausalLM(config.llm_config)
|
72 |
+
else:
|
73 |
+
raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
|
74 |
+
|
75 |
+
vit_hidden_size = config.vision_config.hidden_size
|
76 |
+
llm_hidden_size = config.llm_config.hidden_size
|
77 |
+
|
78 |
+
self.mlp1 = nn.Sequential(
|
79 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
80 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
81 |
+
nn.GELU(),
|
82 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
83 |
+
)
|
84 |
+
|
85 |
+
self.img_context_token_id = None
|
86 |
+
self.conv_template = get_conv_template(self.template)
|
87 |
+
self.system_message = self.conv_template.system_message
|
88 |
+
|
89 |
+
def forward(
|
90 |
+
self,
|
91 |
+
pixel_values: torch.FloatTensor,
|
92 |
+
input_ids: torch.LongTensor = None,
|
93 |
+
attention_mask: Optional[torch.Tensor] = None,
|
94 |
+
position_ids: Optional[torch.LongTensor] = None,
|
95 |
+
image_flags: Optional[torch.LongTensor] = None,
|
96 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
97 |
+
labels: Optional[torch.LongTensor] = None,
|
98 |
+
use_cache: Optional[bool] = None,
|
99 |
+
output_attentions: Optional[bool] = None,
|
100 |
+
output_hidden_states: Optional[bool] = None,
|
101 |
+
return_dict: Optional[bool] = None,
|
102 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
103 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
104 |
+
|
105 |
+
image_flags = image_flags.squeeze(-1)
|
106 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
|
107 |
+
|
108 |
+
vit_embeds = self.extract_feature(pixel_values)
|
109 |
+
vit_embeds = vit_embeds[image_flags == 1]
|
110 |
+
vit_batch_size = pixel_values.shape[0]
|
111 |
+
|
112 |
+
B, N, C = input_embeds.shape
|
113 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
114 |
+
|
115 |
+
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
|
116 |
+
print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
|
117 |
+
|
118 |
+
input_ids = input_ids.reshape(B * N)
|
119 |
+
selected = (input_ids == self.img_context_token_id)
|
120 |
+
try:
|
121 |
+
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
|
122 |
+
except Exception as e:
|
123 |
+
vit_embeds = vit_embeds.reshape(-1, C)
|
124 |
+
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
|
125 |
+
f'vit_embeds.shape={vit_embeds.shape}')
|
126 |
+
n_token = min(selected.sum(), vit_embeds.size(0))
|
127 |
+
input_embeds[selected][:n_token] = input_embeds[selected][:n_token] * 0.0 + vit_embeds[:n_token]
|
128 |
+
|
129 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
130 |
+
|
131 |
+
outputs = self.language_model(
|
132 |
+
inputs_embeds=input_embeds,
|
133 |
+
attention_mask=attention_mask,
|
134 |
+
position_ids=position_ids,
|
135 |
+
past_key_values=past_key_values,
|
136 |
+
use_cache=use_cache,
|
137 |
+
output_attentions=output_attentions,
|
138 |
+
output_hidden_states=output_hidden_states,
|
139 |
+
return_dict=return_dict,
|
140 |
+
)
|
141 |
+
logits = outputs.logits
|
142 |
+
|
143 |
+
loss = None
|
144 |
+
if labels is not None:
|
145 |
+
# Shift so that tokens < n predict n
|
146 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
147 |
+
shift_labels = labels[..., 1:].contiguous()
|
148 |
+
# Flatten the tokens
|
149 |
+
loss_fct = CrossEntropyLoss()
|
150 |
+
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
|
151 |
+
shift_labels = shift_labels.view(-1)
|
152 |
+
# Enable model parallelism
|
153 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
154 |
+
loss = loss_fct(shift_logits, shift_labels)
|
155 |
+
|
156 |
+
if not return_dict:
|
157 |
+
output = (logits,) + outputs[1:]
|
158 |
+
return (loss,) + output if loss is not None else output
|
159 |
+
|
160 |
+
return CausalLMOutputWithPast(
|
161 |
+
loss=loss,
|
162 |
+
logits=logits,
|
163 |
+
past_key_values=outputs.past_key_values,
|
164 |
+
hidden_states=outputs.hidden_states,
|
165 |
+
attentions=outputs.attentions,
|
166 |
+
)
|
167 |
+
|
168 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
169 |
+
n, w, h, c = x.size()
|
170 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
171 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
172 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
173 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
174 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
175 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
176 |
+
int(c / (scale_factor * scale_factor)))
|
177 |
+
if self.ps_version == 'v1':
|
178 |
+
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
|
179 |
+
'which results in a transposed image.')
|
180 |
+
else:
|
181 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
182 |
+
return x
|
183 |
+
|
184 |
+
def extract_feature(self, pixel_values):
|
185 |
+
if self.select_layer == -1:
|
186 |
+
vit_embeds = self.vision_model(
|
187 |
+
pixel_values=pixel_values,
|
188 |
+
output_hidden_states=False,
|
189 |
+
return_dict=True).last_hidden_state
|
190 |
+
else:
|
191 |
+
vit_embeds = self.vision_model(
|
192 |
+
pixel_values=pixel_values,
|
193 |
+
output_hidden_states=True,
|
194 |
+
return_dict=True).hidden_states[self.select_layer]
|
195 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
196 |
+
|
197 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
198 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
199 |
+
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
200 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
201 |
+
vit_embeds = self.mlp1(vit_embeds)
|
202 |
+
return vit_embeds
|
203 |
+
|
204 |
+
def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
|
205 |
+
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
|
206 |
+
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
|
207 |
+
if history is not None or return_history:
|
208 |
+
print('Now multi-turn chat is not supported in batch_chat.')
|
209 |
+
raise NotImplementedError
|
210 |
+
|
211 |
+
if image_counts is not None:
|
212 |
+
num_patches_list = image_counts
|
213 |
+
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
|
214 |
+
|
215 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
216 |
+
self.img_context_token_id = img_context_token_id
|
217 |
+
|
218 |
+
if verbose and pixel_values is not None:
|
219 |
+
image_bs = pixel_values.shape[0]
|
220 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
221 |
+
|
222 |
+
queries = []
|
223 |
+
for idx, num_patches in enumerate(num_patches_list):
|
224 |
+
question = questions[idx]
|
225 |
+
if pixel_values is not None and '<image>' not in question:
|
226 |
+
question = '<image>\n' + question
|
227 |
+
template = get_conv_template(self.template)
|
228 |
+
template.system_message = self.system_message
|
229 |
+
template.append_message(template.roles[0], question)
|
230 |
+
template.append_message(template.roles[1], None)
|
231 |
+
query = template.get_prompt()
|
232 |
+
|
233 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
234 |
+
query = query.replace('<image>', image_tokens, 1)
|
235 |
+
queries.append(query)
|
236 |
+
|
237 |
+
tokenizer.padding_side = 'left'
|
238 |
+
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
239 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
240 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
241 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
242 |
+
generation_config['eos_token_id'] = eos_token_id
|
243 |
+
generation_output = self.generate(
|
244 |
+
pixel_values=pixel_values,
|
245 |
+
input_ids=input_ids,
|
246 |
+
attention_mask=attention_mask,
|
247 |
+
**generation_config
|
248 |
+
)
|
249 |
+
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
250 |
+
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
|
251 |
+
return responses
|
252 |
+
|
253 |
+
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
254 |
+
num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
|
255 |
+
verbose=False):
|
256 |
+
|
257 |
+
if history is None and pixel_values is not None and '<image>' not in question:
|
258 |
+
question = '<image>\n' + question
|
259 |
+
|
260 |
+
if num_patches_list is None:
|
261 |
+
num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
|
262 |
+
assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
|
263 |
+
|
264 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
265 |
+
self.img_context_token_id = img_context_token_id
|
266 |
+
|
267 |
+
template = get_conv_template(self.template)
|
268 |
+
template.system_message = self.system_message
|
269 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
270 |
+
|
271 |
+
history = [] if history is None else history
|
272 |
+
for (old_question, old_answer) in history:
|
273 |
+
template.append_message(template.roles[0], old_question)
|
274 |
+
template.append_message(template.roles[1], old_answer)
|
275 |
+
template.append_message(template.roles[0], question)
|
276 |
+
template.append_message(template.roles[1], None)
|
277 |
+
query = template.get_prompt()
|
278 |
+
|
279 |
+
if verbose and pixel_values is not None:
|
280 |
+
image_bs = pixel_values.shape[0]
|
281 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
282 |
+
|
283 |
+
for num_patches in num_patches_list:
|
284 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
285 |
+
query = query.replace('<image>', image_tokens, 1)
|
286 |
+
|
287 |
+
model_inputs = tokenizer(query, return_tensors='pt')
|
288 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
289 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
290 |
+
generation_config['eos_token_id'] = eos_token_id
|
291 |
+
generation_output = self.generate(
|
292 |
+
pixel_values=pixel_values,
|
293 |
+
input_ids=input_ids,
|
294 |
+
attention_mask=attention_mask,
|
295 |
+
**generation_config
|
296 |
+
)
|
297 |
+
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
298 |
+
response = response.split(template.sep.strip())[0].strip()
|
299 |
+
history.append((question, response))
|
300 |
+
if return_history:
|
301 |
+
return response, history
|
302 |
+
else:
|
303 |
+
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
|
304 |
+
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
|
305 |
+
if verbose:
|
306 |
+
print(query_to_print, response)
|
307 |
+
return response
|
308 |
+
|
309 |
+
@torch.no_grad()
|
310 |
+
def generate(
|
311 |
+
self,
|
312 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
313 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
314 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
315 |
+
visual_features: Optional[torch.FloatTensor] = None,
|
316 |
+
generation_config: Optional[GenerationConfig] = None,
|
317 |
+
output_hidden_states: Optional[bool] = None,
|
318 |
+
**generate_kwargs,
|
319 |
+
) -> torch.LongTensor:
|
320 |
+
|
321 |
+
assert self.img_context_token_id is not None
|
322 |
+
if pixel_values is not None:
|
323 |
+
if visual_features is not None:
|
324 |
+
vit_embeds = visual_features
|
325 |
+
else:
|
326 |
+
vit_embeds = self.extract_feature(pixel_values)
|
327 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
328 |
+
B, N, C = input_embeds.shape
|
329 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
330 |
+
|
331 |
+
input_ids = input_ids.reshape(B * N)
|
332 |
+
selected = (input_ids == self.img_context_token_id)
|
333 |
+
assert selected.sum() != 0
|
334 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
335 |
+
|
336 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
337 |
+
else:
|
338 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
339 |
+
|
340 |
+
outputs = self.language_model.generate(
|
341 |
+
inputs_embeds=input_embeds,
|
342 |
+
attention_mask=attention_mask,
|
343 |
+
generation_config=generation_config,
|
344 |
+
output_hidden_states=output_hidden_states,
|
345 |
+
use_cache=True,
|
346 |
+
**generate_kwargs,
|
347 |
+
)
|
348 |
+
|
349 |
+
return outputs
|
350 |
+
|
351 |
+
@property
|
352 |
+
def lm_head(self):
|
353 |
+
return self.language_model.get_output_embeddings()
|
354 |
+
|
355 |
+
def get_input_embeddings(self):
|
356 |
+
return self.language_model.get_input_embeddings()
|
357 |
+
|
358 |
+
def get_output_embeddings(self):
|
359 |
+
return self.language_model.get_output_embeddings()
|
recipe.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
default_stage:
|
2 |
+
default_modifiers:
|
3 |
+
QuantizationModifier:
|
4 |
+
ignore: ['re:.*lm_head', 're:.*vision.*', 're:.*visual.*', 're:.*image.*', 're:.*patch_embed.*',
|
5 |
+
're:.*pos_embed.*', 're:.*norm.*', 're:.*layernorm.*']
|
6 |
+
targets: [Linear]
|
7 |
+
scheme: FP8_DYNAMIC
|
special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|im_end|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f9ba4b4a6625b5047a1356f6081b641c3e4e6a4a198facbd4bef217747d1685
|
3 |
+
size 11423548
|
tokenizer_config.json
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": false,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"151643": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"151644": {
|
15 |
+
"content": "<|im_start|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"151645": {
|
23 |
+
"content": "<|im_end|>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
},
|
30 |
+
"151646": {
|
31 |
+
"content": "<|object_ref_start|>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"151647": {
|
39 |
+
"content": "<|object_ref_end|>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": false,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
},
|
46 |
+
"151648": {
|
47 |
+
"content": "<|box_start|>",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": false,
|
50 |
+
"rstrip": false,
|
51 |
+
"single_word": false,
|
52 |
+
"special": true
|
53 |
+
},
|
54 |
+
"151649": {
|
55 |
+
"content": "<|box_end|>",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": false,
|
58 |
+
"rstrip": false,
|
59 |
+
"single_word": false,
|
60 |
+
"special": true
|
61 |
+
},
|
62 |
+
"151650": {
|
63 |
+
"content": "<|quad_start|>",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": false,
|
66 |
+
"rstrip": false,
|
67 |
+
"single_word": false,
|
68 |
+
"special": true
|
69 |
+
},
|
70 |
+
"151651": {
|
71 |
+
"content": "<|quad_end|>",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": false,
|
74 |
+
"rstrip": false,
|
75 |
+
"single_word": false,
|
76 |
+
"special": true
|
77 |
+
},
|
78 |
+
"151652": {
|
79 |
+
"content": "<|vision_start|>",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": false,
|
82 |
+
"rstrip": false,
|
83 |
+
"single_word": false,
|
84 |
+
"special": true
|
85 |
+
},
|
86 |
+
"151653": {
|
87 |
+
"content": "<|vision_end|>",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": false,
|
90 |
+
"rstrip": false,
|
91 |
+
"single_word": false,
|
92 |
+
"special": true
|
93 |
+
},
|
94 |
+
"151654": {
|
95 |
+
"content": "<|vision_pad|>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": false,
|
99 |
+
"single_word": false,
|
100 |
+
"special": true
|
101 |
+
},
|
102 |
+
"151655": {
|
103 |
+
"content": "<|image_pad|>",
|
104 |
+
"lstrip": false,
|
105 |
+
"normalized": false,
|
106 |
+
"rstrip": false,
|
107 |
+
"single_word": false,
|
108 |
+
"special": true
|
109 |
+
},
|
110 |
+
"151656": {
|
111 |
+
"content": "<|video_pad|>",
|
112 |
+
"lstrip": false,
|
113 |
+
"normalized": false,
|
114 |
+
"rstrip": false,
|
115 |
+
"single_word": false,
|
116 |
+
"special": true
|
117 |
+
},
|
118 |
+
"151657": {
|
119 |
+
"content": "<tool_call>",
|
120 |
+
"lstrip": false,
|
121 |
+
"normalized": false,
|
122 |
+
"rstrip": false,
|
123 |
+
"single_word": false,
|
124 |
+
"special": false
|
125 |
+
},
|
126 |
+
"151658": {
|
127 |
+
"content": "</tool_call>",
|
128 |
+
"lstrip": false,
|
129 |
+
"normalized": false,
|
130 |
+
"rstrip": false,
|
131 |
+
"single_word": false,
|
132 |
+
"special": false
|
133 |
+
},
|
134 |
+
"151659": {
|
135 |
+
"content": "<|fim_prefix|>",
|
136 |
+
"lstrip": false,
|
137 |
+
"normalized": false,
|
138 |
+
"rstrip": false,
|
139 |
+
"single_word": false,
|
140 |
+
"special": false
|
141 |
+
},
|
142 |
+
"151660": {
|
143 |
+
"content": "<|fim_middle|>",
|
144 |
+
"lstrip": false,
|
145 |
+
"normalized": false,
|
146 |
+
"rstrip": false,
|
147 |
+
"single_word": false,
|
148 |
+
"special": false
|
149 |
+
},
|
150 |
+
"151661": {
|
151 |
+
"content": "<|fim_suffix|>",
|
152 |
+
"lstrip": false,
|
153 |
+
"normalized": false,
|
154 |
+
"rstrip": false,
|
155 |
+
"single_word": false,
|
156 |
+
"special": false
|
157 |
+
},
|
158 |
+
"151662": {
|
159 |
+
"content": "<|fim_pad|>",
|
160 |
+
"lstrip": false,
|
161 |
+
"normalized": false,
|
162 |
+
"rstrip": false,
|
163 |
+
"single_word": false,
|
164 |
+
"special": false
|
165 |
+
},
|
166 |
+
"151663": {
|
167 |
+
"content": "<|repo_name|>",
|
168 |
+
"lstrip": false,
|
169 |
+
"normalized": false,
|
170 |
+
"rstrip": false,
|
171 |
+
"single_word": false,
|
172 |
+
"special": false
|
173 |
+
},
|
174 |
+
"151664": {
|
175 |
+
"content": "<|file_sep|>",
|
176 |
+
"lstrip": false,
|
177 |
+
"normalized": false,
|
178 |
+
"rstrip": false,
|
179 |
+
"single_word": false,
|
180 |
+
"special": false
|
181 |
+
},
|
182 |
+
"151665": {
|
183 |
+
"content": "<img>",
|
184 |
+
"lstrip": false,
|
185 |
+
"normalized": false,
|
186 |
+
"rstrip": false,
|
187 |
+
"single_word": false,
|
188 |
+
"special": true
|
189 |
+
},
|
190 |
+
"151666": {
|
191 |
+
"content": "</img>",
|
192 |
+
"lstrip": false,
|
193 |
+
"normalized": false,
|
194 |
+
"rstrip": false,
|
195 |
+
"single_word": false,
|
196 |
+
"special": true
|
197 |
+
},
|
198 |
+
"151667": {
|
199 |
+
"content": "<IMG_CONTEXT>",
|
200 |
+
"lstrip": false,
|
201 |
+
"normalized": false,
|
202 |
+
"rstrip": false,
|
203 |
+
"single_word": false,
|
204 |
+
"special": true
|
205 |
+
},
|
206 |
+
"151668": {
|
207 |
+
"content": "<quad>",
|
208 |
+
"lstrip": false,
|
209 |
+
"normalized": false,
|
210 |
+
"rstrip": false,
|
211 |
+
"single_word": false,
|
212 |
+
"special": true
|
213 |
+
},
|
214 |
+
"151669": {
|
215 |
+
"content": "</quad>",
|
216 |
+
"lstrip": false,
|
217 |
+
"normalized": false,
|
218 |
+
"rstrip": false,
|
219 |
+
"single_word": false,
|
220 |
+
"special": true
|
221 |
+
},
|
222 |
+
"151670": {
|
223 |
+
"content": "<ref>",
|
224 |
+
"lstrip": false,
|
225 |
+
"normalized": false,
|
226 |
+
"rstrip": false,
|
227 |
+
"single_word": false,
|
228 |
+
"special": true
|
229 |
+
},
|
230 |
+
"151671": {
|
231 |
+
"content": "</ref>",
|
232 |
+
"lstrip": false,
|
233 |
+
"normalized": false,
|
234 |
+
"rstrip": false,
|
235 |
+
"single_word": false,
|
236 |
+
"special": true
|
237 |
+
},
|
238 |
+
"151672": {
|
239 |
+
"content": "<box>",
|
240 |
+
"lstrip": false,
|
241 |
+
"normalized": false,
|
242 |
+
"rstrip": false,
|
243 |
+
"single_word": false,
|
244 |
+
"special": true
|
245 |
+
},
|
246 |
+
"151673": {
|
247 |
+
"content": "</box>",
|
248 |
+
"lstrip": false,
|
249 |
+
"normalized": false,
|
250 |
+
"rstrip": false,
|
251 |
+
"single_word": false,
|
252 |
+
"special": true
|
253 |
+
}
|
254 |
+
},
|
255 |
+
"additional_special_tokens": [
|
256 |
+
"<|im_start|>",
|
257 |
+
"<|im_end|>",
|
258 |
+
"<|object_ref_start|>",
|
259 |
+
"<|object_ref_end|>",
|
260 |
+
"<|box_start|>",
|
261 |
+
"<|box_end|>",
|
262 |
+
"<|quad_start|>",
|
263 |
+
"<|quad_end|>",
|
264 |
+
"<|vision_start|>",
|
265 |
+
"<|vision_end|>",
|
266 |
+
"<|vision_pad|>",
|
267 |
+
"<|image_pad|>",
|
268 |
+
"<|video_pad|>"
|
269 |
+
],
|
270 |
+
"bos_token": null,
|
271 |
+
"clean_up_tokenization_spaces": false,
|
272 |
+
"eos_token": "<|im_end|>",
|
273 |
+
"errors": "replace",
|
274 |
+
"extra_special_tokens": {},
|
275 |
+
"model_max_length": 8192,
|
276 |
+
"pad_token": "<|endoftext|>",
|
277 |
+
"split_special_tokens": false,
|
278 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
279 |
+
"unk_token": null
|
280 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|