raksa-the-wildcats commited on
Commit
383af88
·
1 Parent(s): dbc4f1d

first commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. LICENSE +9 -0
  3. README_CN.md +198 -0
  4. README_PDF_APP.md +117 -0
  5. app.py +27 -0
  6. assets/demo.gif +3 -0
  7. assets/dolphin.png +3 -0
  8. assets/framework.png +3 -0
  9. chat.py +198 -0
  10. config/Dolphin.yaml +17 -0
  11. demo/.DS_Store +0 -0
  12. demo/element_imgs/.DS_Store +0 -0
  13. demo/element_imgs/block_formula.jpeg +3 -0
  14. demo/element_imgs/line_formula.jpeg +3 -0
  15. demo/element_imgs/markdown/.DS_Store +0 -0
  16. demo/element_imgs/markdown/table_1.md +2 -0
  17. demo/element_imgs/para_1.jpg +3 -0
  18. demo/element_imgs/para_2.jpg +3 -0
  19. demo/element_imgs/para_3.jpeg +3 -0
  20. demo/element_imgs/recognition_json/table_1.json +6 -0
  21. demo/element_imgs/table_1.jpeg +3 -0
  22. demo/element_imgs/table_2.jpeg +3 -0
  23. demo/page_imgs/.DS_Store +0 -0
  24. demo/page_imgs/markdown/.DS_Store +0 -0
  25. demo/page_imgs/markdown/figures/.DS_Store +0 -0
  26. demo/page_imgs/markdown/figures/test_page3_figure_000.png +3 -0
  27. demo/page_imgs/markdown/test_page3.md +22 -0
  28. demo/page_imgs/page_1.jpeg +3 -0
  29. demo/page_imgs/page_2.jpeg +3 -0
  30. demo/page_imgs/page_3.jpeg +3 -0
  31. demo/page_imgs/page_4.png +3 -0
  32. demo/page_imgs/page_5.jpg +3 -0
  33. demo/page_imgs/page_6.pdf +0 -0
  34. demo/page_imgs/page_7.jpeg +3 -0
  35. demo/page_imgs/recognition_json/page_1.json +178 -0
  36. demo/page_imgs/recognition_json/test_page.json +47 -0
  37. demo/page_imgs/recognition_json/test_page2.json +102 -0
  38. demo/page_imgs/recognition_json/test_page3.json +124 -0
  39. demo/page_imgs/test_page2.jpeg +3 -0
  40. demo/page_imgs/test_page3.jpeg +3 -0
  41. demo_element.py +129 -0
  42. demo_element_hf.py +195 -0
  43. demo_page.py +247 -0
  44. demo_page_hf.py +365 -0
  45. deployment/ReadMe.md +12 -0
  46. deployment/tensorrt_llm/ReadMe.md +89 -0
  47. deployment/tensorrt_llm/api_client.py +100 -0
  48. deployment/tensorrt_llm/api_server.py +112 -0
  49. deployment/tensorrt_llm/convert/__init__.py +0 -0
  50. deployment/tensorrt_llm/convert/build_visual_engine.py +14 -0
.DS_Store ADDED
Binary file (8.2 kB). View file
 
LICENSE ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright 2025 ByteDance Ltd. and/or its affiliates
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6
+
7
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
+
9
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
README_CN.md ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <img src="./assets/dolphin.png" width="300">
3
+ </div>
4
+
5
+ <div align="center">
6
+ <a href="https://arxiv.org/abs/2505.14059">
7
+ <img src="https://img.shields.io/badge/论文-arXiv-red">
8
+ </a>
9
+ <a href="https://huggingface.co/ByteDance/Dolphin">
10
+ <img src="https://img.shields.io/badge/HuggingFace-Dolphin-yellow">
11
+ </a>
12
+ <a href="https://modelscope.cn/models/ByteDance/Dolphin">
13
+ <img src="https://img.shields.io/badge/ModelScope-Dolphin-purple">
14
+ </a>
15
+ <a href="https://huggingface.co/spaces/ByteDance/Dolphin">
16
+ <img src="https://img.shields.io/badge/演示-Dolphin-blue">
17
+ </a>
18
+ <a href="https://github.com/bytedance/Dolphin">
19
+ <img src="https://img.shields.io/badge/代码-Github-green">
20
+ </a>
21
+ <a href="https://opensource.org/licenses/MIT">
22
+ <img src="https://img.shields.io/badge/许可证-MIT-lightgray">
23
+ </a>
24
+ <br>
25
+ </div>
26
+
27
+ <br>
28
+
29
+ <div align="center">
30
+ <img src="./assets/demo.gif" width="800">
31
+ </div>
32
+
33
+ # Dolphin: 基于异构锚点提示的文档图像解析
34
+
35
+ Dolphin(**Do**cument Image **P**arsing via **H**eterogeneous Anchor Prompt**in**g)是一个创新的多模态文档图像解析模型,采用"分析-解析"的两阶段范式。本仓库包含Dolphin的演示代码和预训练模型。
36
+
37
+ ## 📑 概述
38
+
39
+ 由于文档图像中文本段落、图表、公式和表格等元素的复杂交织,文档图像解析具有挑战性。Dolphin通过两阶段方法解决这些挑战:
40
+
41
+ 1. **🔍 第一阶段**:通过按自然阅读顺序生成元素序列进行全面的页面级布局分析
42
+ 2. **🧩 第二阶段**:使用异构锚点和任务特定提示高效并行解析文档元素
43
+
44
+ <div align="center">
45
+ <img src="./assets/framework.png" width="680">
46
+ </div>
47
+
48
+ Dolphin在多样化的页面级和元素级解析任务中取得了优异的性能,同时通过其轻量级架构和并行解析机制确保了卓越的效率。
49
+
50
+ ## 🚀 演示
51
+ 在 [Demo-Dolphin](http://115.190.42.15:8888/dolphin/) 上试用我们的演示。
52
+
53
+ ## 📅 更新日志
54
+ - 🔥 **2025.06.30** 新增[TensorRT-LLM](https://github.com/bytedance/Dolphin/blob/master/deployment/tensorrt_llm/ReadMe.md)支持,提升推理速度!
55
+ - 🔥 **2025.06.27** 新增[vLLM](https://github.com/bytedance/Dolphin/blob/master/deployment/vllm/ReadMe.md)支持,提升推理速度!
56
+ - 🔥 **2025.06.13** 新增多页PDF文档解析功能。
57
+ - 🔥 **2025.05.21** 我们的演示已在 [链接](http://115.190.42.15:8888/dolphin/) 发布。快来体验吧!
58
+ - 🔥 **2025.05.20** Dolphin的预训练模型和推理代码已发布。
59
+ - 🔥 **2025.05.16** 我们的论文已被ACL 2025接收。论文链接:[arXiv](https://arxiv.org/abs/2505.14059)。
60
+
61
+ ## 🛠️ 安装
62
+
63
+ 1. 克隆仓库:
64
+ ```bash
65
+ git clone https://github.com/ByteDance/Dolphin.git
66
+ cd Dolphin
67
+ ```
68
+
69
+ 2. 安装依赖:
70
+ ```bash
71
+ pip install -r requirements.txt
72
+ ```
73
+
74
+ 3. 使用以下选项之一下载预训练模型:
75
+
76
+ **选项A:原始模型格式(基于配置文件)**
77
+
78
+ 从 [百度网盘](https://pan.baidu.com/s/15zcARoX0CTOHKbW8bFZovQ?pwd=9rpx) 或 [Google Drive](https://drive.google.com/drive/folders/1PQJ3UutepXvunizZEw-uGaQ0BCzf-mie?usp=sharing) 下载,并将其放在 `./checkpoints` 文件夹中。
79
+
80
+ **选项B:Hugging Face模型格式**
81
+
82
+ 访问我们的Huggingface [模型卡片](https://huggingface.co/ByteDance/Dolphin),或通过以下方式下载模型:
83
+
84
+ ```bash
85
+ # 从Hugging Face Hub下载模型
86
+ git lfs install
87
+ git clone https://huggingface.co/ByteDance/Dolphin ./hf_model
88
+ # 或使用Hugging Face CLI
89
+ pip install huggingface_hub
90
+ huggingface-cli download ByteDance/Dolphin --local-dir ./hf_model
91
+ ```
92
+
93
+ ## ⚡ 推理
94
+
95
+ Dolphin提供两个推理框架,支持两种解析粒度:
96
+ - **页面级解析**:将整个文档页面解析为结构化的JSON和Markdown格式
97
+ - **元素级解析**:解析单个文档元素(文本、表格、公式)
98
+
99
+ ### 📄 页面级解析
100
+
101
+ #### 使用原始框架(基于配置文件)
102
+
103
+ ```bash
104
+ # 处理单个文档图像
105
+ python demo_page.py --config ./config/Dolphin.yaml --input_path ./demo/page_imgs/page_1.jpeg --save_dir ./results
106
+
107
+ # 处理单个文档PDF
108
+ python demo_page.py --config ./config/Dolphin.yaml --input_path ./demo/page_imgs/page_6.pdf --save_dir ./results
109
+
110
+ # 处理目录中的所有文档
111
+ python demo_page.py --config ./config/Dolphin.yaml --input_path ./demo/page_imgs --save_dir ./results
112
+
113
+ # 使用自定义批次大小进行并行元素解码
114
+ python demo_page.py --config ./config/Dolphin.yaml --input_path ./demo/page_imgs --save_dir ./results --max_batch_size 8
115
+ ```
116
+
117
+ #### 使用Hugging Face框架
118
+
119
+ ```bash
120
+ # 处理单个文档图像
121
+ python demo_page_hf.py --model_path ./hf_model --input_path ./demo/page_imgs/page_1.jpeg --save_dir ./results
122
+
123
+ # 处理单个文档PDF
124
+ python demo_page_hf.py --model_path ./hf_model --input_path ./demo/page_imgs/page_6.pdf --save_dir ./results
125
+
126
+ # 处理目录中的所有文档
127
+ python demo_page_hf.py --model_path ./hf_model --input_path ./demo/page_imgs --save_dir ./results
128
+
129
+ # 使用自定义批次大小进行并行元素解码
130
+ python demo_page_hf.py --model_path ./hf_model --input_path ./demo/page_imgs --save_dir ./results --max_batch_size 16
131
+ ```
132
+
133
+ ### 🧩 元素级解析
134
+
135
+ #### 使用原始框架(基于配置文件)
136
+
137
+ ```bash
138
+ # 处理单个表格图像
139
+ python demo_element.py --config ./config/Dolphin.yaml --input_path ./demo/element_imgs/table_1.jpeg --element_type table
140
+
141
+ # 处理单个公式图像
142
+ python demo_element.py --config ./config/Dolphin.yaml --input_path ./demo/element_imgs/line_formula.jpeg --element_type formula
143
+
144
+ # 处理单个文本段落图像
145
+ python demo_element.py --config ./config/Dolphin.yaml --input_path ./demo/element_imgs/para_1.jpg --element_type text
146
+ ```
147
+
148
+ #### 使用Hugging Face框架
149
+
150
+ ```bash
151
+ # 处理单个表格图像
152
+ python demo_element_hf.py --model_path ./hf_model --input_path ./demo/element_imgs/table_1.jpeg --element_type table
153
+
154
+ # 处理单个公式图像
155
+ python demo_element_hf.py --model_path ./hf_model --input_path ./demo/element_imgs/line_formula.jpeg --element_type formula
156
+
157
+ # 处理单个文本段落图像
158
+ python demo_element_hf.py --model_path ./hf_model --input_path ./demo/element_imgs/para_1.jpg --element_type text
159
+ ```
160
+
161
+ ## 🌟 主要特性
162
+
163
+ - 🔄 基于单一VLM的两阶段分析-解析方法
164
+ - 📊 在文档解析任务上的优异性能
165
+ - 🔍 自然阅读顺序元素序列生成
166
+ - 🧩 针对不同文档元素的异构锚点提示
167
+ - ⏱️ 高效的并行解析机制
168
+ - 🤗 支持Hugging Face Transformers,便于集成
169
+
170
+ ## 📮 通知
171
+ **征集不良案例:** 如果您遇到模型表现不佳的案例,我们非常欢迎您在issue中分享。我们正在持续优化和改进模型。
172
+
173
+ ## 💖 致谢
174
+
175
+ 我们要感谢以下开源项目为本工作提供的灵感和参考:
176
+ - [Donut](https://github.com/clovaai/donut/)
177
+ - [Nougat](https://github.com/facebookresearch/nougat)
178
+ - [GOT](https://github.com/Ucas-HaoranWei/GOT-OCR2.0)
179
+ - [MinerU](https://github.com/opendatalab/MinerU/tree/master)
180
+ - [Swin](https://github.com/microsoft/Swin-Transformer)
181
+ - [Hugging Face Transformers](https://github.com/huggingface/transformers)
182
+
183
+ ## 📝 引用
184
+
185
+ 如果您在研究中发现此代码有用,请使用以下BibTeX条目。
186
+
187
+ ```bibtex
188
+ @article{feng2025dolphin,
189
+ title={Dolphin: Document Image Parsing via Heterogeneous Anchor Prompting},
190
+ author={Feng, Hao and Wei, Shu and Fei, Xiang and Shi, Wei and Han, Yingdong and Liao, Lei and Lu, Jinghui and Wu, Binghong and Liu, Qi and Lin, Chunhui and others},
191
+ journal={arXiv preprint arXiv:2505.14059},
192
+ year={2025}
193
+ }
194
+ ```
195
+
196
+ ## 星标历史
197
+
198
+ [![Star History Chart](https://api.star-history.com/svg?repos=bytedance/Dolphin&type=Date)](https://www.star-history.com/#bytedance/Dolphin&Date)
README_PDF_APP.md ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DOLPHIN PDF Document AI - HuggingFace Spaces App
2
+
3
+ A Gradio-based web application for processing PDF documents using the DOLPHIN vision-language model. This app converts PDF files to images and processes them page by page to extract text, tables, and figures.
4
+
5
+ ## Features
6
+
7
+ - **PDF Upload**: Upload PDF documents directly through the web interface
8
+ - **Page-by-Page Processing**: Converts PDF pages to high-quality images and processes each individually
9
+ - **Document Parsing**: Extracts text, tables, and figures using the DOLPHIN model
10
+ - **Markdown Output**: Generates clean markdown with embedded images and tables
11
+ - **Memory Optimized**: Designed for NVIDIA T4 GPU deployment on HuggingFace Spaces
12
+ - **Progress Tracking**: Real-time progress updates during processing
13
+
14
+ ## Files
15
+
16
+ - `gradio_pdf_app.py` - Main Gradio application with PDF processing functionality
17
+ - `app.py` - HuggingFace Spaces entry point
18
+ - `requirements_hf_spaces.txt` - Dependencies optimized for HF Spaces deployment
19
+
20
+ ## Usage
21
+
22
+ ### Local Development
23
+
24
+ ```bash
25
+ # Install dependencies
26
+ pip install -r requirements_hf_spaces.txt
27
+
28
+ # Run the app
29
+ python gradio_pdf_app.py
30
+ ```
31
+
32
+ ### HuggingFace Spaces Deployment
33
+
34
+ 1. Create a new HuggingFace Space with Gradio SDK
35
+ 2. Upload the following files:
36
+ - `app.py`
37
+ - `gradio_pdf_app.py`
38
+ - `utils/` (directory with utility functions)
39
+ - `requirements_hf_spaces.txt` (rename to `requirements.txt`)
40
+
41
+ 3. Configure the Space:
42
+ - **SDK**: Gradio
43
+ - **Hardware**: NVIDIA T4 Small (recommended)
44
+ - **Python Version**: 3.9+
45
+
46
+ ## Technical Details
47
+
48
+ ### Memory Optimizations
49
+
50
+ - Uses `torch.float16` for GPU inference
51
+ - Smaller batch sizes (4) for element processing
52
+ - Memory cleanup with `torch.cuda.empty_cache()`
53
+ - Reduced max sequence length (2048) for generation
54
+
55
+ ### PDF Processing Pipeline
56
+
57
+ 1. **PDF to Images**: Uses PyMuPDF with 2x zoom for quality
58
+ 2. **Layout Analysis**: DOLPHIN model parses document structure
59
+ 3. **Element Extraction**: Processes text, tables, and figures separately
60
+ 4. **Markdown Generation**: Converts results to formatted markdown
61
+ 5. **Gallery View**: Creates overview of all processed pages
62
+
63
+ ### Model Integration
64
+
65
+ - Uses HuggingFace transformers implementation
66
+ - Loads model with `device_map="auto"` for GPU optimization
67
+ - Batch processing for improved efficiency
68
+ - Graceful fallback to CPU if GPU unavailable
69
+
70
+ ## Configuration
71
+
72
+ The app automatically detects and uses the DOLPHIN model:
73
+ - Local path: `./hf_model`
74
+ - HuggingFace Hub: `ByteDance/DOLPHIN`
75
+
76
+ ## Dependencies
77
+
78
+ Core requirements:
79
+ - `torch>=2.1.0` - PyTorch for model inference
80
+ - `transformers>=4.47.0` - HuggingFace model loading
81
+ - `gradio>=5.36.0` - Web interface
82
+ - `pymupdf>=1.26.0` - PDF processing
83
+ - `pillow>=9.3.0` - Image processing
84
+ - `opencv-python-headless>=4.8.0` - Computer vision operations
85
+
86
+ ## Error Handling
87
+
88
+ - Graceful handling of PDF conversion failures
89
+ - Memory management for large documents
90
+ - Progress reporting for long-running operations
91
+ - Fallback markdown generation if converter fails
92
+
93
+ ## Performance Notes
94
+
95
+ - Optimized for NVIDIA T4 with 16GB VRAM
96
+ - Processing time: ~30-60 seconds per page (depends on complexity)
97
+ - Memory usage: ~8-12GB VRAM for typical documents
98
+ - CPU fallback available but significantly slower
99
+
100
+ ## Example Output
101
+
102
+ The app generates:
103
+ 1. **Markdown Preview**: Rendered document with LaTeX support
104
+ 2. **Raw Markdown**: Source text for copying/editing
105
+ 3. **Page Gallery**: Visual overview of all processed pages
106
+ 4. **JSON Details**: Technical processing information
107
+
108
+ ## Troubleshooting
109
+
110
+ - **Out of Memory**: Reduce batch size or use CPU
111
+ - **PDF Conversion Failed**: Check PDF format compatibility
112
+ - **Model Loading Error**: Verify model path and permissions
113
+ - **Slow Processing**: Ensure GPU is available and configured
114
+
115
+ ## Credits
116
+
117
+ Built on the DOLPHIN model by ByteDance. Optimized for HuggingFace Spaces deployment.
app.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HuggingFace Spaces entry point for DOLPHIN PDF Document AI
3
+ """
4
+
5
+ import os
6
+ import sys
7
+
8
+ # Add the current directory to Python path for imports
9
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
10
+
11
+ # Import and run the Gradio app
12
+ from gradio_pdf_app import demo
13
+
14
+ if __name__ == "__main__":
15
+ # Launch the app for HuggingFace Spaces
16
+ demo.launch(
17
+ server_name="0.0.0.0",
18
+ server_port=7860,
19
+ share=False,
20
+ show_error=True,
21
+ enable_queue=True,
22
+ max_threads=2,
23
+ # Additional HF Spaces specific settings
24
+ inbrowser=False,
25
+ show_tips=False,
26
+ quiet=True
27
+ )
assets/demo.gif ADDED

Git LFS Details

  • SHA256: 003bcda91af8e23c007d6d1c5e23bee177c5735c7ba914b9ee33670829d59a2c
  • Pointer size: 132 Bytes
  • Size of remote file: 3.23 MB
assets/dolphin.png ADDED

Git LFS Details

  • SHA256: 3f462bb6eaf6cf9ba02caa04966ec354e1352f2cb1ac3e03ead082a0ba725170
  • Pointer size: 130 Bytes
  • Size of remote file: 83.3 kB
assets/framework.png ADDED

Git LFS Details

  • SHA256: f23f47c5ec092369a0707fa6e82ec4dd03ed10044b00ef10aff5f7c89570187e
  • Pointer size: 132 Bytes
  • Size of remote file: 2 MB
chat.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
3
+ SPDX-License-Identifier: MIT
4
+ """
5
+
6
+ import os
7
+ import warnings
8
+ from collections import OrderedDict
9
+
10
+ from omegaconf import ListConfig
11
+
12
+ warnings.filterwarnings("ignore", category=UserWarning)
13
+ warnings.filterwarnings("ignore", category=FutureWarning)
14
+ os.environ.setdefault("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python")
15
+
16
+ import torch
17
+ from PIL import Image
18
+ from transformers import PreTrainedTokenizerFast
19
+
20
+ from utils.model import DonutConfig, DonutModel, SwinEncoder
21
+ from utils.processor import DolphinProcessor
22
+
23
+
24
+ def try_rename_lagacy_weights(ckpt, output_path=""):
25
+ if "state_dict" in ckpt.keys():
26
+ ckpt = ckpt["state_dict"]
27
+ if "module" in ckpt.keys():
28
+ ckpt = ckpt["module"]
29
+ new_ckpt = OrderedDict()
30
+ for k, v in ckpt.items():
31
+ if k.startswith("model."):
32
+ k = k[len("model.") :]
33
+ if k.startswith("encoder"):
34
+ new_ckpt["vpm" + k[len("encoder") :]] = v
35
+ elif k.startswith("decoder"):
36
+ new_ckpt["llm" + k[len("encoder") :]] = v
37
+ else:
38
+ new_ckpt[k] = v
39
+ if output_path:
40
+ torch.save(new_ckpt, output_path)
41
+ return new_ckpt
42
+
43
+
44
+ def convert_listconfig_to_list(config):
45
+ new_config = {}
46
+ for k, v in config.items():
47
+ if isinstance(v, ListConfig):
48
+ new_config[k] = list(v)
49
+ else:
50
+ new_config[k] = v
51
+ return new_config
52
+
53
+
54
+ class DOLPHIN:
55
+ def __init__(self, config, ckpt_path="") -> None:
56
+ self.model_args = config.model
57
+ self.swin_args = config.model.pop("swin_args")
58
+ self.swin_args = convert_listconfig_to_list(self.swin_args)
59
+
60
+ vision_tower = SwinEncoder(
61
+ input_size=self.swin_args["img_size"],
62
+ patch_size=self.swin_args["patch_size"],
63
+ embed_dim=self.swin_args["embed_dim"],
64
+ window_size=self.swin_args["window_size"],
65
+ encoder_layer=self.swin_args["encoder_layer"],
66
+ num_heads=self.swin_args["num_heads"],
67
+ align_long_axis=self.swin_args["align_long_axis"],
68
+ )
69
+
70
+ self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=self.model_args.tokenizer_path)
71
+ self.tokenizer.pad_token = "<pad>"
72
+ self.tokenizer.bos_token = "<s>"
73
+ self.tokenizer.eos_token = "</s>"
74
+ self.tokenizer.unk_token = "<unk>"
75
+
76
+ if self.model_args.get("extra_answer_tokens", False):
77
+ # print("Allowing multitask training: adding <Answer/> to the tokenizer.")
78
+ prompt_end_token = " <Answer/>"
79
+ self.tokenizer.add_special_tokens({"additional_special_tokens": sorted(set([prompt_end_token]))})
80
+ self.tokenizer._prompt_end_token = prompt_end_token
81
+ self.tokenizer._prompt_end_token_id = self.tokenizer.convert_tokens_to_ids(prompt_end_token)
82
+
83
+ donut_config = DonutConfig(
84
+ decoder_layer=self.model_args.decoder_layer,
85
+ max_length=self.model_args.max_length,
86
+ max_position_embeddings=self.model_args.max_position_embeddings,
87
+ hidden_dimension=self.model_args.hidden_dimension,
88
+ )
89
+
90
+ self.model = DonutModel(config=donut_config, vision_tower=vision_tower, tokenizer=self.tokenizer)
91
+ if self.model_args.model_name_or_path:
92
+ ckpt = torch.load(self.model_args.model_name_or_path)
93
+ ckpt = try_rename_lagacy_weights(ckpt)
94
+ self.model.load_state_dict(ckpt, strict=True)
95
+
96
+ device = "cuda" if torch.cuda.is_available() else "cpu"
97
+ self.model.to(device)
98
+ self.model.eval()
99
+ transform_args = {
100
+ "input_size": self.swin_args["img_size"],
101
+ "max_length": self.model_args.max_length,
102
+ }
103
+ self.processor = DolphinProcessor({}, self.tokenizer, transform_args=transform_args)
104
+
105
+ def chat(
106
+ self,
107
+ question,
108
+ image,
109
+ return_raw=False,
110
+ return_score=False,
111
+ return_img_size=False,
112
+ only_return_img_size=False,
113
+ max_batch_size=16,
114
+ ):
115
+
116
+ def _preprocess_image(image):
117
+ if isinstance(image, str):
118
+ image = Image.open(image).convert("RGB")
119
+ if return_img_size or only_return_img_size:
120
+ image_tensor, ori_size = self.processor.process_image_for_inference(image, return_img_size=True)
121
+ else:
122
+ image_tensor = self.processor.process_image_for_inference(image, return_img_size=False)
123
+ ori_size = None
124
+ return image_tensor, ori_size
125
+
126
+ def _preprocess_prompt(question):
127
+ if self.model_args.get("extra_answer_tokens", False):
128
+ if self.tokenizer._prompt_end_token not in question:
129
+ question = question + self.tokenizer._prompt_end_token
130
+ prompt_ids = self.processor.process_prompt_for_inference(question)
131
+ return prompt_ids
132
+
133
+ def _preprocess_prompt_batch(question):
134
+ if self.model_args.get("extra_answer_tokens", False):
135
+ for i in range(len(question)):
136
+ if self.tokenizer._prompt_end_token not in question[i]:
137
+ question[i] = question[i] + self.tokenizer._prompt_end_token
138
+ if not question[i].startswith("<s>"):
139
+ question[i] = "<s>" + question[i]
140
+ return question
141
+
142
+ def _postprocess(output, question):
143
+ output = output.replace("<s>", "").replace(question, "").replace("</s>", "").replace("<pad>", "")
144
+ if self.model_args.get("extra_answer_tokens", False):
145
+ output = output.split(self.tokenizer._prompt_end_token)[-1]
146
+ return output
147
+
148
+ if isinstance(question, list):
149
+ image_tensor_list = []
150
+ for i in image:
151
+ image_tensor, ori_size = _preprocess_image(i)
152
+ image_tensor_list.append(image_tensor)
153
+ image_tensor = torch.cat(image_tensor_list, dim=0)
154
+
155
+ question = _preprocess_prompt_batch(question)
156
+ self.processor.tokenizer.padding_side = "left"
157
+ prompt_ids = self.processor.tokenizer(
158
+ question, add_special_tokens=False, return_tensors="pt", padding=True
159
+ ).input_ids
160
+ else:
161
+ image_tensor, ori_size = _preprocess_image(image)
162
+ prompt_ids = _preprocess_prompt(question)
163
+
164
+ if only_return_img_size:
165
+ return ori_size
166
+
167
+ model_output_batch = []
168
+ for i in range(0, image_tensor.shape[0], max_batch_size):
169
+ image_tensor_batch = image_tensor[i : i + max_batch_size]
170
+ prompt_ids_batch = prompt_ids[i : i + max_batch_size]
171
+ model_output = self.model.inference(image_tensors=image_tensor_batch, prompt_ids=prompt_ids_batch)
172
+ model_output_batch.append(model_output)
173
+ model_output = {}
174
+ for k, v in model_output_batch[0].items():
175
+ if isinstance(v, torch.Tensor):
176
+ model_output[k] = sum(
177
+ [v_batch[k].cpu().numpy().tolist() for v_batch in model_output_batch],
178
+ [],
179
+ )
180
+ else:
181
+ model_output[k] = sum([v_batch[k] for v_batch in model_output_batch], [])
182
+
183
+ if return_raw:
184
+ if return_img_size:
185
+ return model_output, ori_size
186
+ return model_output
187
+ else:
188
+ if isinstance(question, list):
189
+ output = [_postprocess(model_output["repetitions"][i], question[i]) for i in range(len(question))]
190
+ score = model_output["scores"]
191
+ else:
192
+ output = _postprocess(model_output["repetitions"][0], question)
193
+ score = model_output["scores"][0]
194
+ if return_score:
195
+ return output, score
196
+ if return_img_size:
197
+ return output, ori_size
198
+ return output
config/Dolphin.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ model_name_or_path: "./checkpoints/dolphin_model.bin"
3
+ tokenizer_path: "./checkpoints/dolphin_tokenizer.json"
4
+ extra_answer_tokens: True # add <Answer/> token
5
+ max_length: 4096
6
+ decoder_layer: 10
7
+ max_position_embeddings: 4096
8
+ hidden_dimension: 1024
9
+ swin_args:
10
+ name: 'swin'
11
+ img_size: [896, 896]
12
+ patch_size: 4
13
+ embed_dim: 128
14
+ align_long_axis: False
15
+ window_size: 7
16
+ encoder_layer: [2, 2, 14, 2]
17
+ num_heads: [4, 8, 16, 32]
demo/.DS_Store ADDED
Binary file (6.15 kB). View file
 
demo/element_imgs/.DS_Store ADDED
Binary file (6.15 kB). View file
 
demo/element_imgs/block_formula.jpeg ADDED

Git LFS Details

  • SHA256: 5dc9c328d058816ef31d878a0d42f0751606afd3b77854057910a81451dae1b4
  • Pointer size: 130 Bytes
  • Size of remote file: 92.5 kB
demo/element_imgs/line_formula.jpeg ADDED

Git LFS Details

  • SHA256: 65e2be8cc82c609364e1f921cacb822213f0ca2eafd86f5721b6f0499ceb8712
  • Pointer size: 130 Bytes
  • Size of remote file: 55.3 kB
demo/element_imgs/markdown/.DS_Store ADDED
Binary file (6.15 kB). View file
 
demo/element_imgs/markdown/table_1.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ <table><tr><td></td><td></td><td>100-class (top-1 acc.)</td><td>1000-class (top-1 acc.)</td></tr><tr><td colspan="2">4096-d (float)</td><td>77.1 ± 1.5</td><td>65.0</td></tr><tr><td rowspan="3">1024 bits</td><td>BP</td><td>72.9 ± 1.3</td><td>58.1</td></tr><tr><td>CBE</td><td>73.0 ± 1.3</td><td>59.2</td></tr><tr><td>SP</td><td>73.8 ± 1.3</td><td>60.1</td></tr><tr><td rowspan="4">4096 bits</td><td>threshold [1]</td><td>73.5 ± 1.4</td><td>59.1</td></tr><tr><td>BP</td><td>76.0 ± 1.5</td><td>63.2</td></tr><tr><td>CBE</td><td>75.9 ± 1.4</td><td>63.0</td></tr><tr><td>SP</td><td>76.3 ± 1.5</td><td>63.3</td></tr><tr><td>8192 bits</td><td>SP</td><td>76.8 ± 1.4</td><td>64.2</td></tr><tr><td>16384 bits</td><td>SP</td><td>77.1 ± 1.6</td><td>64.5</td></tr></table>
2
+
demo/element_imgs/para_1.jpg ADDED

Git LFS Details

  • SHA256: 68308a404e8e4c111f5cc1568e7f4b74f1f0c08ad4485e2ad9e78869f79a556b
  • Pointer size: 130 Bytes
  • Size of remote file: 18.7 kB
demo/element_imgs/para_2.jpg ADDED

Git LFS Details

  • SHA256: 8d9eda1c71490b76ac5d3ef33f436fb6e6db4ca3b625d5d74f35c3b248949c56
  • Pointer size: 130 Bytes
  • Size of remote file: 69.8 kB
demo/element_imgs/para_3.jpeg ADDED

Git LFS Details

  • SHA256: b372541d80263c5508b8b85ccf847123874efdb4c25473845fbf042f2d9cc5a9
  • Pointer size: 130 Bytes
  • Size of remote file: 84 kB
demo/element_imgs/recognition_json/table_1.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "label": "tab",
4
+ "text": "<table><tr><td></td><td></td><td>100-class (top-1 acc.)</td><td>1000-class (top-1 acc.)</td></tr><tr><td colspan=\"2\">4096-d (float)</td><td>77.1 ± 1.5</td><td>65.0</td></tr><tr><td rowspan=\"3\">1024 bits</td><td>BP</td><td>72.9 ± 1.3</td><td>58.1</td></tr><tr><td>CBE</td><td>73.0 ± 1.3</td><td>59.2</td></tr><tr><td>SP</td><td>73.8 ± 1.3</td><td>60.1</td></tr><tr><td rowspan=\"4\">4096 bits</td><td>threshold [1]</td><td>73.5 ± 1.4</td><td>59.1</td></tr><tr><td>BP</td><td>76.0 ± 1.5</td><td>63.2</td></tr><tr><td>CBE</td><td>75.9 ± 1.4</td><td>63.0</td></tr><tr><td>SP</td><td>76.3 ± 1.5</td><td>63.3</td></tr><tr><td>8192 bits</td><td>SP</td><td>76.8 ± 1.4</td><td>64.2</td></tr><tr><td>16384 bits</td><td>SP</td><td>77.1 ± 1.6</td><td>64.5</td></tr></table>"
5
+ }
6
+ ]
demo/element_imgs/table_1.jpeg ADDED

Git LFS Details

  • SHA256: 1ccce9dab1a1b537ae502183f461ad3331a2b9eeb8574790e6ec43ca54f24e2c
  • Pointer size: 131 Bytes
  • Size of remote file: 183 kB
demo/element_imgs/table_2.jpeg ADDED

Git LFS Details

  • SHA256: 3fdc67f4bb8afee58ff4ee84412581deb771cf26f5fe9eead742108700e9650e
  • Pointer size: 131 Bytes
  • Size of remote file: 406 kB
demo/page_imgs/.DS_Store ADDED
Binary file (8.2 kB). View file
 
demo/page_imgs/markdown/.DS_Store ADDED
Binary file (6.15 kB). View file
 
demo/page_imgs/markdown/figures/.DS_Store ADDED
Binary file (6.15 kB). View file
 
demo/page_imgs/markdown/figures/test_page3_figure_000.png ADDED

Git LFS Details

  • SHA256: eba97bcb2eefbc653f4b5db7572799a9674b8fd39e5f14d261c33e1916a9f009
  • Pointer size: 130 Bytes
  • Size of remote file: 63.4 kB
demo/page_imgs/markdown/test_page3.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![Figure](figures/test_page3_figure_000.png)
2
+
3
+ Figure 2: (left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.
4
+
5
+ query with all keys, divide each by $\sqrt{d_k}$ , and apply a softmax function to obtain the weights on the values.
6
+
7
+ In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$ . The keys and values are also packed together into matrices $K$ and $V$ . We compute the matrix of outputs as: $$ \\ \text{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V \\ $$
8
+
9
+ The two most commonly used attention functions are additive attention [2] , and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$ . Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
10
+
11
+ While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ [ 3 ] . We suspect that for large values of $d_k$ , the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients 4 To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$ .
12
+
13
+ 3.2.2 Multi-Head Attention
14
+
15
+ Instead of performing a single attention function with $d_{\text{model}}$ -dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$ , $d_k$ and $d_v$ dimensions, respectively. On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$ -dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure 2 .
16
+
17
+ Multi­head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
18
+
19
+ ${ }^{4}$ To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean 0 and variance 1 . Then their dot product, $q \cdot k=\sum_{i=1}^{d_{k}} q_{i} k_{i}$, has mean 0 and variance $d_{k}$.
20
+
21
+ 4
22
+
demo/page_imgs/page_1.jpeg ADDED

Git LFS Details

  • SHA256: aba4e06f5debeb14a59a193818f6787aa06f17f4cb21c0e483d8267f5397b627
  • Pointer size: 132 Bytes
  • Size of remote file: 1.52 MB
demo/page_imgs/page_2.jpeg ADDED

Git LFS Details

  • SHA256: 25e08746f10d4472d80659869eb73a477ad665d7aaaa850e70aae1bd6076d826
  • Pointer size: 132 Bytes
  • Size of remote file: 1.47 MB
demo/page_imgs/page_3.jpeg ADDED

Git LFS Details

  • SHA256: fe6e35a3c888c77ec36cf48cb762556e489e288d30a457a353ac6bba6fab9251
  • Pointer size: 131 Bytes
  • Size of remote file: 449 kB
demo/page_imgs/page_4.png ADDED

Git LFS Details

  • SHA256: 497cdabe38a4db8318284c0f8963304a876ceceebb796059903703834e4713ed
  • Pointer size: 131 Bytes
  • Size of remote file: 372 kB
demo/page_imgs/page_5.jpg ADDED

Git LFS Details

  • SHA256: 17cdc261fcd7eb8db4a0bdfb56dc2b1f77c8890956f8451f810695e115f6f894
  • Pointer size: 131 Bytes
  • Size of remote file: 641 kB
demo/page_imgs/page_6.pdf ADDED
The diff for this file is too large to render. See raw diff
 
demo/page_imgs/page_7.jpeg ADDED

Git LFS Details

  • SHA256: 19bb9afdb859e905e017fc3d3bac6da0490093811820529f285a20e8d70609f2
  • Pointer size: 132 Bytes
  • Size of remote file: 1.27 MB
demo/page_imgs/recognition_json/page_1.json ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "label": "title",
4
+ "bbox": [
5
+ 271,
6
+ 188,
7
+ 1194,
8
+ 221
9
+ ],
10
+ "text": "LLaMA: Open and Efficient Foundation Language Models",
11
+ "reading_order": 0
12
+ },
13
+ {
14
+ "label": "author",
15
+ "bbox": [
16
+ 313,
17
+ 272,
18
+ 1154,
19
+ 317
20
+ ],
21
+ "text": "Hugo Touvron; Thibaut Lavril*, Gautier Izacard*, Xavier Martinet",
22
+ "reading_order": 1
23
+ },
24
+ {
25
+ "label": "para",
26
+ "bbox": [
27
+ 269,
28
+ 317,
29
+ 1201,
30
+ 425
31
+ ],
32
+ "text": "Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal\nEric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin\nEdouard Grave*Guillaume Lample*",
33
+ "reading_order": 2
34
+ },
35
+ {
36
+ "label": "para",
37
+ "bbox": [
38
+ 685,
39
+ 440,
40
+ 795,
41
+ 482
42
+ ],
43
+ "text": "Meta AI",
44
+ "reading_order": 3
45
+ },
46
+ {
47
+ "label": "sec",
48
+ "bbox": [
49
+ 376,
50
+ 524,
51
+ 502,
52
+ 565
53
+ ],
54
+ "text": "\\begin{abstract}",
55
+ "reading_order": 4
56
+ },
57
+ {
58
+ "label": "para",
59
+ "bbox": [
60
+ 209,
61
+ 586,
62
+ 675,
63
+ 946
64
+ ],
65
+ "text": "We introduce LLaMA, a collection of founda-\ntion language models ranging from 7B to 65B\nparameters. We train our models on trillions\nof tokens, and show that it is possible to train\nstate-of-the-art models using publicly avail-\nable datasets exclusively, without resorting\nto proprietary and inaccessible datasets. In\nparticular, LLaMA-13B outperforms GPT-3\n(175B) on most benchmarks, and LLaMA-\n65B is competitive with the best models,\nChinchilla-70B and PaLM-540B. We release\nall our models to the research community $^1$ .",
66
+ "reading_order": 5
67
+ },
68
+ {
69
+ "label": "sec",
70
+ "bbox": [
71
+ 167,
72
+ 964,
73
+ 376,
74
+ 1006
75
+ ],
76
+ "text": "1 Introduction",
77
+ "reading_order": 6
78
+ },
79
+ {
80
+ "label": "para",
81
+ "bbox": [
82
+ 167,
83
+ 1027,
84
+ 718,
85
+ 1498
86
+ ],
87
+ "text": "Large Languages Models (LLMs) trained on mas-\nsive corpora of texts have shown their ability to per-\nform new tasks from textual instructions or from a\nfew examples ( Brown et al. , 2020 ) . These few-shot\nproperties first appeared when scaling models to a\nsufficient size ( Kaplan et al. , 2020 ) , resulting in a\nline of work that focuses on further scaling these\nmodels ( Chowdhery et al. , 2022 ; Rae et al. , 2021 ) .\nThese efforts are based on the assumption that\nmore parameters will lead to better performance.\nHowever, recent work from Hoffmann et al. ( 2022 )\nshows that, for a given compute budget, the best\nperformances are not achieved by the largest mod-\nels, but by smaller models trained on more data.",
88
+ "reading_order": 7
89
+ },
90
+ {
91
+ "label": "para",
92
+ "bbox": [
93
+ 167,
94
+ 1506,
95
+ 717,
96
+ 1844
97
+ ],
98
+ "text": "The objective of the scaling laws from Hoff-\nmann et al. ( 2022 ) is to determine how to best\nscale the dataset and model sizes for a particular\ntraining compute budget. However, this objective\ndisregards the inference budget, which becomes\ncritical when serving a language model at scale.\nIn this context, given a target level of performance,\nthe preferred model is not the fastest to train but the\nfastest at inference, and although it may be cheaper\nto train a large model to reach a certain level of",
99
+ "reading_order": 8
100
+ },
101
+ {
102
+ "label": "para",
103
+ "bbox": [
104
+ 753,
105
+ 539,
106
+ 1304,
107
+ 734
108
+ ],
109
+ "text": "performance, a smaller one trained longer will\nultimately be cheaper at inference. For instance,\nalthough Hoffmann et al. ( 2022 ) recommends\ntraining a 10B model on 200B tokens, we find\nthat the performance of a 7B model continues to\nimprove even after 1T tokens.",
110
+ "reading_order": 9
111
+ },
112
+ {
113
+ "label": "para",
114
+ "bbox": [
115
+ 753,
116
+ 769,
117
+ 1305,
118
+ 1236
119
+ ],
120
+ "text": "The focus of this work is to train a series of\nlanguage models that achieve the best possible per-\nformance at various inference budgets, by training\non more tokens than what is typically used. The\nresulting models, called LLaMA , ranges from 7B\nto 65B parameters with competitive performance\ncompared to the best existing LLMs. For instance,\nLLaMA-13B outperforms GPT-3 on most bench-\nmarks, despite being 10 $\\times$ smaller. We believe that\nthis model will help democratize the access and\nstudy of LLMs, since it can be run on a single GPU.\nAt the higher-end of the scale, our 65B-parameter\nmodel is also competitive with the best large lan-\nguage models such as Chinchilla or PaLM-540B.",
121
+ "reading_order": 10
122
+ },
123
+ {
124
+ "label": "para",
125
+ "bbox": [
126
+ 753,
127
+ 1257,
128
+ 1305,
129
+ 1601
130
+ ],
131
+ "text": "Unlike Chinchilla, PaLM, or GPT-3, we only\nuse publicly available data, making our work com-\npatible with open-sourcing, while most existing\nmodels rely on data which is either not publicly\navailable or undocumented (e.g. “ Books – 2TB ” or\n“ Social media conversations ” ). There exist some\nexceptions, notably OPT ( Zhang et al. , 2022 ) ,\nGPT-NeoX ( Black et al. , 2022 ) , BLOOM ( Scao\net al. , 2022 ) and GLM ( Zeng et al. , 2022 ) , but none\nthat are competitive with PaLM-62B or Chinchilla.",
132
+ "reading_order": 11
133
+ },
134
+ {
135
+ "label": "para",
136
+ "bbox": [
137
+ 753,
138
+ 1634,
139
+ 1304,
140
+ 1933
141
+ ],
142
+ "text": "In the rest of this paper, we present an overview\nof the modifications we made to the transformer\narchitecture ( Vaswani et al. , 2017 ) , as well as our\ntraining method. We then report the performance of\nour models and compare with others LLMs on a set\nof standard benchmarks. Finally, we expose some\nof the biases and toxicity encoded in our models,\nusing some of the most recent benchmarks from\nthe responsible AI community.",
143
+ "reading_order": 12
144
+ },
145
+ {
146
+ "label": "fnote",
147
+ "bbox": [
148
+ 167,
149
+ 1844,
150
+ 712,
151
+ 1907
152
+ ],
153
+ "text": "* Equal contribution.\nCorrespondence:\n{htouvron\nthibautlav,gizacard,egrave,glample}@meta.com",
154
+ "reading_order": 13
155
+ },
156
+ {
157
+ "label": "fnote",
158
+ "bbox": [
159
+ 209,
160
+ 1907,
161
+ 632,
162
+ 1931
163
+ ],
164
+ "text": "https://github.com/facebookresearch/llama",
165
+ "reading_order": 14
166
+ },
167
+ {
168
+ "label": "watermark",
169
+ "bbox": [
170
+ 20,
171
+ 649,
172
+ 83,
173
+ 1530
174
+ ],
175
+ "text": "arXiv:2302.1397lvl [cs.CL] 27 Feb 2023",
176
+ "reading_order": 15
177
+ }
178
+ ]
demo/page_imgs/recognition_json/test_page.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "label": "header",
4
+ "bbox": [
5
+ 291,
6
+ 90,
7
+ 675,
8
+ 120
9
+ ],
10
+ "text": "Scaled Dot-Product Attention",
11
+ "reading_order": 0
12
+ },
13
+ {
14
+ "label": "fig",
15
+ "text": "![Figure](figures/test_page_figure_001.png)",
16
+ "figure_path": "figures/test_page_figure_001.png",
17
+ "bbox": [
18
+ 1274,
19
+ 105,
20
+ 1536,
21
+ 627
22
+ ],
23
+ "reading_order": 1
24
+ },
25
+ {
26
+ "label": "cap",
27
+ "bbox": [
28
+ 168,
29
+ 719,
30
+ 1413,
31
+ 789
32
+ ],
33
+ "text": "Figure 2: (left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several\nattention layers running in parallel.",
34
+ "reading_order": 2
35
+ },
36
+ {
37
+ "label": "para",
38
+ "bbox": [
39
+ 168,
40
+ 858,
41
+ 1413,
42
+ 934
43
+ ],
44
+ "text": "query with all keys, divide each by $\\sqrt{d_{k}}$, and apply a softmax function to obtain the weights on the\nvalues.",
45
+ "reading_order": 3
46
+ }
47
+ ]
demo/page_imgs/recognition_json/test_page2.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "label": "fig",
4
+ "text": "![Figure](figures/test_page2_figure_000.png)",
5
+ "figure_path": "figures/test_page2_figure_000.png",
6
+ "bbox": [
7
+ 394,
8
+ 117,
9
+ 897,
10
+ 837
11
+ ],
12
+ "reading_order": 0
13
+ },
14
+ {
15
+ "label": "cap",
16
+ "bbox": [
17
+ 445,
18
+ 852,
19
+ 856,
20
+ 873
21
+ ],
22
+ "text": "Figure 1: The Transformer - model architecture",
23
+ "reading_order": 1
24
+ },
25
+ {
26
+ "label": "para",
27
+ "bbox": [
28
+ 218,
29
+ 920,
30
+ 1086,
31
+ 1044
32
+ ],
33
+ "text": "wise fully connected feed-forward network. We employ a residual connection [ 10 ] around each of\nthe two sub-layers, followed by layer normalization [ 1 ] . That is, the output of each sub-layer is\n$\\mathrm{LayerNorm}(x+\\mathrm{Sublayer}(x))$ , where $\\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer\nitself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding\nlayers, produce outputs of dimension $d_{\\text{model}}=512$ .",
34
+ "reading_order": 2
35
+ },
36
+ {
37
+ "label": "para",
38
+ "bbox": [
39
+ 218,
40
+ 1071,
41
+ 1085,
42
+ 1244
43
+ ],
44
+ "text": "The The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two\nsub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head\nattention over the output of the encoder stack. Similar to the encoder, we employ residual connections\naround each of the sub-layers, followed by layer normalization. We also modify the self-attention\nsub-layer in the decoder stack to prevent positions from attending to subsequent positions. This\nmasking, combined with fact that the output embeddings are offset by one position, ensures that the\npredictions for position $i$ can depend only on the known outputs at positions less than $i$ .",
45
+ "reading_order": 3
46
+ },
47
+ {
48
+ "label": "sub_sec",
49
+ "bbox": [
50
+ 226,
51
+ 1283,
52
+ 344,
53
+ 1305
54
+ ],
55
+ "text": "3.2 Attention",
56
+ "reading_order": 4
57
+ },
58
+ {
59
+ "label": "para",
60
+ "bbox": [
61
+ 218,
62
+ 1322,
63
+ 1087,
64
+ 1422
65
+ ],
66
+ "text": "An attention function can be described as mapping a query and a set of key-value pairs to an output,\nwhere the query, keys, values, and output are all vectors. The output is computed as a weighted sum\nof the values, where the weight assigned to each value is computed by a compatibility function of the\nquery with the corresponding key.",
67
+ "reading_order": 5
68
+ },
69
+ {
70
+ "label": "sub_sub_sec",
71
+ "bbox": [
72
+ 218,
73
+ 1456,
74
+ 562,
75
+ 1474
76
+ ],
77
+ "text": "3.2.1 Scaled Dot-Product Attention",
78
+ "reading_order": 6
79
+ },
80
+ {
81
+ "label": "para",
82
+ "bbox": [
83
+ 218,
84
+ 1498,
85
+ 1085,
86
+ 1546
87
+ ],
88
+ "text": "We call our particular attention \"Scaled Dot-Product Attention\" (Figure 2 ). The input consists of\nqueries and keys of dimension $d_k$ , and values of dimension $d_v$ . We compute the dot products of the",
89
+ "reading_order": 7
90
+ },
91
+ {
92
+ "label": "foot",
93
+ "bbox": [
94
+ 646,
95
+ 1590,
96
+ 662,
97
+ 1607
98
+ ],
99
+ "text": "3",
100
+ "reading_order": 8
101
+ }
102
+ ]
demo/page_imgs/recognition_json/test_page3.json ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "label": "fig",
4
+ "text": "![Figure](figures/test_page3_figure_000.png)",
5
+ "figure_path": "figures/test_page3_figure_000.png",
6
+ "bbox": [
7
+ 331,
8
+ 134,
9
+ 984,
10
+ 489
11
+ ],
12
+ "reading_order": 0
13
+ },
14
+ {
15
+ "label": "cap",
16
+ "bbox": [
17
+ 198,
18
+ 554,
19
+ 1065,
20
+ 603
21
+ ],
22
+ "text": "Figure 2: (left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several\nattention layers running in parallel.",
23
+ "reading_order": 1
24
+ },
25
+ {
26
+ "label": "para",
27
+ "bbox": [
28
+ 198,
29
+ 652,
30
+ 1065,
31
+ 701
32
+ ],
33
+ "text": "query with all keys, divide each by $\\sqrt{d_k}$ , and apply a softmax function to obtain the weights on the\nvalues.",
34
+ "reading_order": 2
35
+ },
36
+ {
37
+ "label": "para",
38
+ "bbox": [
39
+ 198,
40
+ 715,
41
+ 1065,
42
+ 881
43
+ ],
44
+ "text": "In practice, we compute the attention function on a set of queries simultaneously, packed together\ninto a matrix $Q$ . The keys and values are also packed together into matrices $K$ and $V$ . We compute\nthe matrix of outputs as:\n\\[\n \\text{Attention}(Q, K, V) = \\mathrm{softmax}(\\frac{QK^T}{\\sqrt{d_k}})V\n\\]",
45
+ "reading_order": 3
46
+ },
47
+ {
48
+ "label": "para",
49
+ "bbox": [
50
+ 198,
51
+ 913,
52
+ 1068,
53
+ 1060
54
+ ],
55
+ "text": "The two most commonly used attention functions are additive attention [2] , and dot-product (multi-\nplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor\nof $\\frac{1}{\\sqrt{d_k}}$ . Additive attention computes the compatibility function using a feed-forward network with\na single hidden layer. While the two are similar in theoretical complexity, dot-product attention is\nmuch faster and more space-efficient in practice, since it can be implemented using highly optimized\nmatrix multiplication code.",
56
+ "reading_order": 4
57
+ },
58
+ {
59
+ "label": "para",
60
+ "bbox": [
61
+ 198,
62
+ 1074,
63
+ 1066,
64
+ 1175
65
+ ],
66
+ "text": "While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms\ndot product attention without scaling for larger values of $d_k$ [ 3 ] . We suspect that for large values of\n$d_k$ , the dot products grow large in magnitude, pushing the softmax function into regions where it has\nextremely small gradients 4 To counteract this effect, we scale the dot products by $\\frac{1}{\\sqrt{d_k}}$ .",
67
+ "reading_order": 5
68
+ },
69
+ {
70
+ "label": "sub_sub_sec",
71
+ "bbox": [
72
+ 198,
73
+ 1207,
74
+ 467,
75
+ 1225
76
+ ],
77
+ "text": "3.2.2 Multi-Head Attention",
78
+ "reading_order": 6
79
+ },
80
+ {
81
+ "label": "para",
82
+ "bbox": [
83
+ 198,
84
+ 1253,
85
+ 1067,
86
+ 1395
87
+ ],
88
+ "text": "Instead of performing a single attention function with $d_{\\text{model}}$ -dimensional keys, values and queries,\nwe found it beneficial to linearly project the queries, keys and values $h$ times with different, learned\nlinear projections to $d_k$ , $d_k$ and $d_v$ dimensions, respectively. On each of these projected versions of\nqueries, keys and values we then perform the attention function in parallel, yielding $d_v$ -dimensional\noutput values. These are concatenated and once again projected, resulting in the final values, as\ndepicted in Figure 2 .",
89
+ "reading_order": 7
90
+ },
91
+ {
92
+ "label": "para",
93
+ "bbox": [
94
+ 198,
95
+ 1403,
96
+ 1065,
97
+ 1453
98
+ ],
99
+ "text": "Multi­head attention allows the model to jointly attend to information from different representation\nsubspaces at different positions. With a single attention head, averaging inhibits this.",
100
+ "reading_order": 8
101
+ },
102
+ {
103
+ "label": "fnote",
104
+ "bbox": [
105
+ 198,
106
+ 1485,
107
+ 1065,
108
+ 1535
109
+ ],
110
+ "text": "${ }^{4}$ To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random\nvariables with mean 0 and variance 1 . Then their dot product, $q \\cdot k=\\sum_{i=1}^{d_{k}} q_{i} k_{i}$, has mean 0 and variance $d_{k}$.",
111
+ "reading_order": 9
112
+ },
113
+ {
114
+ "label": "foot",
115
+ "bbox": [
116
+ 625,
117
+ 1578,
118
+ 641,
119
+ 1599
120
+ ],
121
+ "text": "4",
122
+ "reading_order": 10
123
+ }
124
+ ]
demo/page_imgs/test_page2.jpeg ADDED

Git LFS Details

  • SHA256: 2bbda18d9f6ab0279f80718b15d66e1e444279b24a55a23b872f70a382060ac1
  • Pointer size: 131 Bytes
  • Size of remote file: 366 kB
demo/page_imgs/test_page3.jpeg ADDED

Git LFS Details

  • SHA256: f5a5beda63acd2046fc4c7f39e4aa63e70db723936d71488e5819ab106f90ec0
  • Pointer size: 131 Bytes
  • Size of remote file: 358 kB
demo_element.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
3
+ SPDX-License-Identifier: MIT
4
+ """
5
+
6
+ import argparse
7
+ import glob
8
+ import os
9
+
10
+ from omegaconf import OmegaConf
11
+ from PIL import Image
12
+
13
+ from chat import DOLPHIN
14
+ from utils.utils import *
15
+
16
+
17
+ def process_element(image_path, model, element_type, save_dir=None):
18
+ """Process a single element image (text, table, formula)
19
+
20
+ Args:
21
+ image_path: Path to the element image
22
+ model: DOLPHIN model instance
23
+ element_type: Type of element ('text', 'table', 'formula')
24
+ save_dir: Directory to save results (default: same as input directory)
25
+
26
+ Returns:
27
+ Parsed content of the element and recognition results
28
+ """
29
+ # Load and prepare image
30
+ pil_image = Image.open(image_path).convert("RGB")
31
+ pil_image = crop_margin(pil_image)
32
+
33
+ # Select appropriate prompt based on element type
34
+ if element_type == "table":
35
+ prompt = "Parse the table in the image."
36
+ label = "tab"
37
+ elif element_type == "formula":
38
+ prompt = "Read text in the image."
39
+ label = "formula"
40
+ else: # Default to text
41
+ prompt = "Read text in the image."
42
+ label = "text"
43
+
44
+ # Process the element
45
+ result = model.chat(prompt, pil_image)
46
+
47
+ # Create recognition result in the same format as the document parser
48
+ recognition_result = [
49
+ {
50
+ "label": label,
51
+ "text": result.strip(),
52
+ }
53
+ ]
54
+
55
+ # Save results if save_dir is provided
56
+ if save_dir:
57
+ save_outputs(recognition_result, image_path, save_dir)
58
+ print(f"Results saved to {save_dir}")
59
+
60
+ return result, recognition_result
61
+
62
+
63
+ def main():
64
+ parser = argparse.ArgumentParser(description="Element-level processing using DOLPHIN model")
65
+ parser.add_argument("--config", default="./config/Dolphin.yaml", help="Path to configuration file")
66
+ parser.add_argument("--input_path", type=str, required=True, help="Path to input image or directory of images")
67
+ parser.add_argument(
68
+ "--element_type",
69
+ type=str,
70
+ choices=["text", "table", "formula"],
71
+ default="text",
72
+ help="Type of element to process (text, table, formula)",
73
+ )
74
+ parser.add_argument(
75
+ "--save_dir",
76
+ type=str,
77
+ default=None,
78
+ help="Directory to save parsing results (default: same as input directory)",
79
+ )
80
+ parser.add_argument("--print_results", action="store_true", help="Print recognition results to console")
81
+ args = parser.parse_args()
82
+
83
+ # Load Model
84
+ config = OmegaConf.load(args.config)
85
+ model = DOLPHIN(config)
86
+
87
+ # Set save directory
88
+ save_dir = args.save_dir or (
89
+ args.input_path if os.path.isdir(args.input_path) else os.path.dirname(args.input_path)
90
+ )
91
+ setup_output_dirs(save_dir)
92
+
93
+ # Collect Images
94
+ if os.path.isdir(args.input_path):
95
+ image_files = []
96
+ for ext in [".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG"]:
97
+ image_files.extend(glob.glob(os.path.join(args.input_path, f"*{ext}")))
98
+ image_files = sorted(image_files)
99
+ else:
100
+ if not os.path.exists(args.input_path):
101
+ raise FileNotFoundError(f"Input path {args.input_path} does not exist")
102
+ image_files = [args.input_path]
103
+
104
+ total_samples = len(image_files)
105
+ print(f"\nTotal samples to process: {total_samples}")
106
+
107
+ # Process images one by one
108
+ for image_path in image_files:
109
+ print(f"\nProcessing {image_path}")
110
+ try:
111
+ result, recognition_result = process_element(
112
+ image_path=image_path,
113
+ model=model,
114
+ element_type=args.element_type,
115
+ save_dir=save_dir,
116
+ )
117
+
118
+ if args.print_results:
119
+ print("\nRecognition result:")
120
+ print(result)
121
+ print("-" * 40)
122
+
123
+ except Exception as e:
124
+ print(f"Error processing {image_path}: {str(e)}")
125
+ continue
126
+
127
+
128
+ if __name__ == "__main__":
129
+ main()
demo_element_hf.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
3
+ SPDX-License-Identifier: MIT
4
+ """
5
+
6
+ import argparse
7
+ import glob
8
+ import os
9
+
10
+ import torch
11
+ from PIL import Image
12
+ from transformers import AutoProcessor, VisionEncoderDecoderModel
13
+
14
+ from utils.utils import *
15
+
16
+
17
+ class DOLPHIN:
18
+ def __init__(self, model_id_or_path):
19
+ """Initialize the Hugging Face model
20
+
21
+ Args:
22
+ model_id_or_path: Path to local model or Hugging Face model ID
23
+ """
24
+ # Load model from local path or Hugging Face hub
25
+ self.processor = AutoProcessor.from_pretrained(model_id_or_path)
26
+ self.model = VisionEncoderDecoderModel.from_pretrained(model_id_or_path)
27
+ self.model.eval()
28
+
29
+ # Set device and precision
30
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
31
+ self.model.to(self.device)
32
+ self.model = self.model.half() # Always use half precision by default
33
+
34
+ # set tokenizer
35
+ self.tokenizer = self.processor.tokenizer
36
+
37
+ def chat(self, prompt, image):
38
+ """Process an image with the given prompt
39
+
40
+ Args:
41
+ prompt: Text prompt to guide the model
42
+ image: PIL Image to process
43
+
44
+ Returns:
45
+ Generated text from the model
46
+ """
47
+ # Prepare image
48
+ pixel_values = self.processor(image, return_tensors="pt").pixel_values
49
+ pixel_values = pixel_values.half()
50
+
51
+ # Prepare prompt
52
+ prompt = f"<s>{prompt} <Answer/>"
53
+ prompt_ids = self.tokenizer(
54
+ prompt,
55
+ add_special_tokens=False,
56
+ return_tensors="pt"
57
+ ).input_ids.to(self.device)
58
+
59
+ decoder_attention_mask = torch.ones_like(prompt_ids)
60
+
61
+ # Generate text
62
+ outputs = self.model.generate(
63
+ pixel_values=pixel_values.to(self.device),
64
+ decoder_input_ids=prompt_ids,
65
+ decoder_attention_mask=decoder_attention_mask,
66
+ min_length=1,
67
+ max_length=4096,
68
+ pad_token_id=self.tokenizer.pad_token_id,
69
+ eos_token_id=self.tokenizer.eos_token_id,
70
+ use_cache=True,
71
+ bad_words_ids=[[self.tokenizer.unk_token_id]],
72
+ return_dict_in_generate=True,
73
+ do_sample=False,
74
+ num_beams=1,
75
+ repetition_penalty=1.1,
76
+ temperature=1.0
77
+ )
78
+
79
+ # Process the output
80
+ sequence = self.tokenizer.batch_decode(outputs.sequences, skip_special_tokens=False)[0]
81
+ sequence = sequence.replace(prompt, "").replace("<pad>", "").replace("</s>", "").strip()
82
+
83
+ return sequence
84
+
85
+ def process_element(image_path, model, element_type, save_dir=None):
86
+ """Process a single element image (text, table, formula)
87
+
88
+ Args:
89
+ image_path: Path to the element image
90
+ model: HFModel model instance
91
+ element_type: Type of element ('text', 'table', 'formula')
92
+ save_dir: Directory to save results (default: same as input directory)
93
+
94
+ Returns:
95
+ Parsed content of the element and recognition results
96
+ """
97
+ # Load and prepare image
98
+ pil_image = Image.open(image_path).convert("RGB")
99
+ pil_image = crop_margin(pil_image)
100
+
101
+ # Select appropriate prompt based on element type
102
+ if element_type == "table":
103
+ prompt = "Parse the table in the image."
104
+ label = "tab"
105
+ elif element_type == "formula":
106
+ prompt = "Read text in the image."
107
+ label = "formula"
108
+ else: # Default to text
109
+ prompt = "Read text in the image."
110
+ label = "text"
111
+
112
+ # Process the element
113
+ result = model.chat(prompt, pil_image)
114
+
115
+ # Create recognition result in the same format as the document parser
116
+ recognition_result = [
117
+ {
118
+ "label": label,
119
+ "text": result.strip(),
120
+ }
121
+ ]
122
+
123
+ # Save results if save_dir is provided
124
+ if save_dir:
125
+ save_outputs(recognition_result, image_path, save_dir)
126
+ print(f"Results saved to {save_dir}")
127
+
128
+ return result, recognition_result
129
+
130
+
131
+ def main():
132
+ parser = argparse.ArgumentParser(description="Element-level processing using DOLPHIN model")
133
+ parser.add_argument("--model_path", default="./hf_model", help="Path to Hugging Face model")
134
+ parser.add_argument("--input_path", type=str, required=True, help="Path to input image or directory of images")
135
+ parser.add_argument(
136
+ "--element_type",
137
+ type=str,
138
+ choices=["text", "table", "formula"],
139
+ default="text",
140
+ help="Type of element to process (text, table, formula)",
141
+ )
142
+ parser.add_argument(
143
+ "--save_dir",
144
+ type=str,
145
+ default=None,
146
+ help="Directory to save parsing results (default: same as input directory)",
147
+ )
148
+ parser.add_argument("--print_results", action="store_true", help="Print recognition results to console")
149
+ args = parser.parse_args()
150
+
151
+ # Load Model
152
+ model = DOLPHIN(args.model_path)
153
+
154
+ # Set save directory
155
+ save_dir = args.save_dir or (
156
+ args.input_path if os.path.isdir(args.input_path) else os.path.dirname(args.input_path)
157
+ )
158
+ setup_output_dirs(save_dir)
159
+
160
+ # Collect Images
161
+ if os.path.isdir(args.input_path):
162
+ image_files = []
163
+ for ext in [".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG"]:
164
+ image_files.extend(glob.glob(os.path.join(args.input_path, f"*{ext}")))
165
+ image_files = sorted(image_files)
166
+ else:
167
+ if not os.path.exists(args.input_path):
168
+ raise FileNotFoundError(f"Input path {args.input_path} does not exist")
169
+ image_files = [args.input_path]
170
+
171
+ total_samples = len(image_files)
172
+ print(f"\nTotal samples to process: {total_samples}")
173
+
174
+ # Process images one by one
175
+ for image_path in image_files:
176
+ print(f"\nProcessing {image_path}")
177
+ try:
178
+ result, recognition_result = process_element(
179
+ image_path=image_path,
180
+ model=model,
181
+ element_type=args.element_type,
182
+ save_dir=save_dir,
183
+ )
184
+
185
+ if args.print_results:
186
+ print("\nRecognition result:")
187
+ print(result)
188
+ print("-" * 40)
189
+ except Exception as e:
190
+ print(f"Error processing {image_path}: {str(e)}")
191
+ continue
192
+
193
+
194
+ if __name__ == "__main__":
195
+ main()
demo_page.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
3
+ SPDX-License-Identifier: MIT
4
+ """
5
+
6
+ import argparse
7
+ import glob
8
+ import os
9
+
10
+ import cv2
11
+ from omegaconf import OmegaConf
12
+ from PIL import Image
13
+
14
+ from chat import DOLPHIN
15
+ from utils.utils import *
16
+
17
+
18
+ def process_document(document_path, model, save_dir, max_batch_size):
19
+ """Parse documents - Handles both images and PDFs"""
20
+ file_ext = os.path.splitext(document_path)[1].lower()
21
+
22
+ if file_ext == '.pdf':
23
+ # Process PDF file
24
+ # Convert PDF to images
25
+ images = convert_pdf_to_images(document_path)
26
+ if not images:
27
+ raise Exception(f"Failed to convert PDF {document_path} to images")
28
+
29
+ all_results = []
30
+
31
+ # Process each page
32
+ for page_idx, pil_image in enumerate(images):
33
+ print(f"Processing page {page_idx + 1}/{len(images)}")
34
+
35
+ # Generate output name for this page
36
+ base_name = os.path.splitext(os.path.basename(document_path))[0]
37
+ page_name = f"{base_name}_page_{page_idx + 1:03d}"
38
+
39
+ # Process this page (don't save individual page results)
40
+ json_path, recognition_results = process_single_image(
41
+ pil_image, model, save_dir, page_name, max_batch_size, save_individual=False
42
+ )
43
+
44
+ # Add page information to results
45
+ page_results = {
46
+ "page_number": page_idx + 1,
47
+ "elements": recognition_results
48
+ }
49
+ all_results.append(page_results)
50
+
51
+ # Save combined results for multi-page PDF
52
+ combined_json_path = save_combined_pdf_results(all_results, document_path, save_dir)
53
+
54
+ return combined_json_path, all_results
55
+
56
+ else:
57
+ # Process regular image file
58
+ pil_image = Image.open(document_path).convert("RGB")
59
+ base_name = os.path.splitext(os.path.basename(document_path))[0]
60
+ return process_single_image(pil_image, model, save_dir, base_name, max_batch_size)
61
+
62
+
63
+ def process_single_image(image, model, save_dir, image_name, max_batch_size, save_individual=True):
64
+ """Process a single image (either from file or converted from PDF page)
65
+
66
+ Args:
67
+ image: PIL Image object
68
+ model: DOLPHIN model instance
69
+ save_dir: Directory to save results
70
+ image_name: Name for the output file
71
+ max_batch_size: Maximum batch size for processing
72
+ save_individual: Whether to save individual results (False for PDF pages)
73
+
74
+ Returns:
75
+ Tuple of (json_path, recognition_results)
76
+ """
77
+ # Stage 1: Page-level layout and reading order parsing
78
+ layout_output = model.chat("Parse the reading order of this document.", image)
79
+
80
+ # Stage 2: Element-level content parsing
81
+ padded_image, dims = prepare_image(image)
82
+ recognition_results = process_elements(layout_output, padded_image, dims, model, max_batch_size, save_dir, image_name)
83
+
84
+ # Save outputs only if requested (skip for PDF pages)
85
+ json_path = None
86
+ if save_individual:
87
+ # Create a dummy image path for save_outputs function
88
+ dummy_image_path = f"{image_name}.jpg" # Extension doesn't matter, only basename is used
89
+ json_path = save_outputs(recognition_results, dummy_image_path, save_dir)
90
+
91
+ return json_path, recognition_results
92
+
93
+
94
+ def process_elements(layout_results, padded_image, dims, model, max_batch_size, save_dir=None, image_name=None):
95
+ """Parse all document elements with parallel decoding"""
96
+ layout_results = parse_layout_string(layout_results)
97
+
98
+ text_table_elements = [] # Elements that need processing
99
+ figure_results = [] # Figure elements (no processing needed)
100
+ previous_box = None
101
+ reading_order = 0
102
+
103
+ # Collect elements for processing
104
+ for bbox, label in layout_results:
105
+ try:
106
+ # Adjust coordinates
107
+ x1, y1, x2, y2, orig_x1, orig_y1, orig_x2, orig_y2, previous_box = process_coordinates(
108
+ bbox, padded_image, dims, previous_box
109
+ )
110
+
111
+ # Crop and parse element
112
+ cropped = padded_image[y1:y2, x1:x2]
113
+ if cropped.size > 0 and cropped.shape[0] > 3 and cropped.shape[1] > 3:
114
+ if label == "fig":
115
+ pil_crop = Image.fromarray(cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB))
116
+
117
+ figure_filename = save_figure_to_local(pil_crop, save_dir, image_name, reading_order)
118
+
119
+ # For figure regions, store relative path instead of base64
120
+ figure_results.append(
121
+ {
122
+ "label": label,
123
+ "text": f"![Figure](figures/{figure_filename})",
124
+ "figure_path": f"figures/{figure_filename}",
125
+ "bbox": [orig_x1, orig_y1, orig_x2, orig_y2],
126
+ "reading_order": reading_order,
127
+ }
128
+ )
129
+ else:
130
+ # For text or table regions, prepare for parsing
131
+ pil_crop = Image.fromarray(cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB))
132
+ prompt = "Parse the table in the image." if label == "tab" else "Read text in the image."
133
+ text_table_elements.append(
134
+ {
135
+ "crop": pil_crop,
136
+ "prompt": prompt,
137
+ "label": label,
138
+ "bbox": [orig_x1, orig_y1, orig_x2, orig_y2],
139
+ "reading_order": reading_order,
140
+ }
141
+ )
142
+
143
+ reading_order += 1
144
+
145
+ except Exception as e:
146
+ print(f"Error processing bbox with label {label}: {str(e)}")
147
+ continue
148
+
149
+ # Parse text/table elements in parallel
150
+ recognition_results = figure_results
151
+ if text_table_elements:
152
+ crops_list = [elem["crop"] for elem in text_table_elements]
153
+ prompts_list = [elem["prompt"] for elem in text_table_elements]
154
+
155
+ # Inference in batch
156
+ batch_results = model.chat(prompts_list, crops_list, max_batch_size=max_batch_size)
157
+
158
+ # Add batch results to recognition_results
159
+ for i, result in enumerate(batch_results):
160
+ elem = text_table_elements[i]
161
+ recognition_results.append(
162
+ {
163
+ "label": elem["label"],
164
+ "bbox": elem["bbox"],
165
+ "text": result.strip(),
166
+ "reading_order": elem["reading_order"],
167
+ }
168
+ )
169
+
170
+ # Sort elements by reading order
171
+ recognition_results.sort(key=lambda x: x.get("reading_order", 0))
172
+
173
+ return recognition_results
174
+
175
+
176
+ def main():
177
+ parser = argparse.ArgumentParser(description="Document parsing based on DOLPHIN")
178
+ parser.add_argument("--config", default="./config/Dolphin.yaml", help="Path to configuration file")
179
+ parser.add_argument("--input_path", type=str, default="./demo", help="Path to input image/PDF or directory of files")
180
+ parser.add_argument(
181
+ "--save_dir",
182
+ type=str,
183
+ default=None,
184
+ help="Directory to save parsing results (default: same as input directory)",
185
+ )
186
+ parser.add_argument(
187
+ "--max_batch_size",
188
+ type=int,
189
+ default=4,
190
+ help="Maximum number of document elements to parse in a single batch (default: 4)",
191
+ )
192
+ args = parser.parse_args()
193
+
194
+ # Load Model
195
+ config = OmegaConf.load(args.config)
196
+ model = DOLPHIN(config)
197
+
198
+ # Collect Document Files (images and PDFs)
199
+ if os.path.isdir(args.input_path):
200
+ # Support both image and PDF files
201
+ file_extensions = [".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG", ".pdf", ".PDF"]
202
+
203
+ document_files = []
204
+ for ext in file_extensions:
205
+ document_files.extend(glob.glob(os.path.join(args.input_path, f"*{ext}")))
206
+ document_files = sorted(document_files)
207
+ else:
208
+ if not os.path.exists(args.input_path):
209
+ raise FileNotFoundError(f"Input path {args.input_path} does not exist")
210
+
211
+ # Check if it's a supported file type
212
+ file_ext = os.path.splitext(args.input_path)[1].lower()
213
+ supported_exts = ['.jpg', '.jpeg', '.png', '.pdf']
214
+
215
+ if file_ext not in supported_exts:
216
+ raise ValueError(f"Unsupported file type: {file_ext}. Supported types: {supported_exts}")
217
+
218
+ document_files = [args.input_path]
219
+
220
+ save_dir = args.save_dir or (
221
+ args.input_path if os.path.isdir(args.input_path) else os.path.dirname(args.input_path)
222
+ )
223
+ setup_output_dirs(save_dir)
224
+
225
+ total_samples = len(document_files)
226
+ print(f"\nTotal files to process: {total_samples}")
227
+
228
+ # Process All Document Files
229
+ for file_path in document_files:
230
+ print(f"\nProcessing {file_path}")
231
+ try:
232
+ json_path, recognition_results = process_document(
233
+ document_path=file_path,
234
+ model=model,
235
+ save_dir=save_dir,
236
+ max_batch_size=args.max_batch_size,
237
+ )
238
+
239
+ print(f"Processing completed. Results saved to {save_dir}")
240
+
241
+ except Exception as e:
242
+ print(f"Error processing {file_path}: {str(e)}")
243
+ continue
244
+
245
+
246
+ if __name__ == "__main__":
247
+ main()
demo_page_hf.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
3
+ SPDX-License-Identifier: MIT
4
+ """
5
+
6
+ import argparse
7
+ import glob
8
+ import os
9
+
10
+ import cv2
11
+ import torch
12
+ from PIL import Image
13
+ from transformers import AutoProcessor, VisionEncoderDecoderModel
14
+
15
+ from utils.utils import *
16
+
17
+
18
+ class DOLPHIN:
19
+ def __init__(self, model_id_or_path):
20
+ """Initialize the Hugging Face model
21
+
22
+ Args:
23
+ model_id_or_path: Path to local model or Hugging Face model ID
24
+ """
25
+ # Load model from local path or Hugging Face hub
26
+ self.processor = AutoProcessor.from_pretrained(model_id_or_path)
27
+ self.model = VisionEncoderDecoderModel.from_pretrained(model_id_or_path)
28
+ self.model.eval()
29
+
30
+ # Set device and precision
31
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
32
+ self.model.to(self.device)
33
+ self.model = self.model.half() # Always use half precision by default
34
+
35
+ # set tokenizer
36
+ self.tokenizer = self.processor.tokenizer
37
+
38
+ def chat(self, prompt, image):
39
+ """Process an image or batch of images with the given prompt(s)
40
+
41
+ Args:
42
+ prompt: Text prompt or list of prompts to guide the model
43
+ image: PIL Image or list of PIL Images to process
44
+
45
+ Returns:
46
+ Generated text or list of texts from the model
47
+ """
48
+ # Check if we're dealing with a batch
49
+ is_batch = isinstance(image, list)
50
+
51
+ if not is_batch:
52
+ # Single image, wrap it in a list for consistent processing
53
+ images = [image]
54
+ prompts = [prompt]
55
+ else:
56
+ # Batch of images
57
+ images = image
58
+ prompts = prompt if isinstance(prompt, list) else [prompt] * len(images)
59
+
60
+ # Prepare image
61
+ batch_inputs = self.processor(images, return_tensors="pt", padding=True)
62
+ batch_pixel_values = batch_inputs.pixel_values.half().to(self.device)
63
+
64
+ # Prepare prompt
65
+ prompts = [f"<s>{p} <Answer/>" for p in prompts]
66
+ batch_prompt_inputs = self.tokenizer(
67
+ prompts,
68
+ add_special_tokens=False,
69
+ return_tensors="pt"
70
+ )
71
+
72
+ batch_prompt_ids = batch_prompt_inputs.input_ids.to(self.device)
73
+ batch_attention_mask = batch_prompt_inputs.attention_mask.to(self.device)
74
+
75
+ # Generate text
76
+ outputs = self.model.generate(
77
+ pixel_values=batch_pixel_values,
78
+ decoder_input_ids=batch_prompt_ids,
79
+ decoder_attention_mask=batch_attention_mask,
80
+ min_length=1,
81
+ max_length=4096,
82
+ pad_token_id=self.tokenizer.pad_token_id,
83
+ eos_token_id=self.tokenizer.eos_token_id,
84
+ use_cache=True,
85
+ bad_words_ids=[[self.tokenizer.unk_token_id]],
86
+ return_dict_in_generate=True,
87
+ do_sample=False,
88
+ num_beams=1,
89
+ repetition_penalty=1.1,
90
+ temperature=1.0
91
+ )
92
+
93
+ # Process output
94
+ sequences = self.tokenizer.batch_decode(outputs.sequences, skip_special_tokens=False)
95
+
96
+ # Clean prompt text from output
97
+ results = []
98
+ for i, sequence in enumerate(sequences):
99
+ cleaned = sequence.replace(prompts[i], "").replace("<pad>", "").replace("</s>", "").strip()
100
+ results.append(cleaned)
101
+
102
+ # Return a single result for single image input
103
+ if not is_batch:
104
+ return results[0]
105
+ return results
106
+
107
+
108
+ def process_document(document_path, model, save_dir, max_batch_size=None):
109
+ """Parse documents with two stages - Handles both images and PDFs"""
110
+ file_ext = os.path.splitext(document_path)[1].lower()
111
+
112
+ if file_ext == '.pdf':
113
+ # Process PDF file
114
+ # Convert PDF to images
115
+ images = convert_pdf_to_images(document_path)
116
+ if not images:
117
+ raise Exception(f"Failed to convert PDF {document_path} to images")
118
+
119
+ all_results = []
120
+
121
+ # Process each page
122
+ for page_idx, pil_image in enumerate(images):
123
+ print(f"Processing page {page_idx + 1}/{len(images)}")
124
+
125
+ # Generate output name for this page
126
+ base_name = os.path.splitext(os.path.basename(document_path))[0]
127
+ page_name = f"{base_name}_page_{page_idx + 1:03d}"
128
+
129
+ # Process this page (don't save individual page results)
130
+ json_path, recognition_results = process_single_image(
131
+ pil_image, model, save_dir, page_name, max_batch_size, save_individual=False
132
+ )
133
+
134
+ # Add page information to results
135
+ page_results = {
136
+ "page_number": page_idx + 1,
137
+ "elements": recognition_results
138
+ }
139
+ all_results.append(page_results)
140
+
141
+ # Save combined results for multi-page PDF
142
+ combined_json_path = save_combined_pdf_results(all_results, document_path, save_dir)
143
+
144
+ return combined_json_path, all_results
145
+
146
+ else:
147
+ # Process regular image file
148
+ pil_image = Image.open(document_path).convert("RGB")
149
+ base_name = os.path.splitext(os.path.basename(document_path))[0]
150
+ return process_single_image(pil_image, model, save_dir, base_name, max_batch_size)
151
+
152
+
153
+ def process_single_image(image, model, save_dir, image_name, max_batch_size=None, save_individual=True):
154
+ """Process a single image (either from file or converted from PDF page)
155
+
156
+ Args:
157
+ image: PIL Image object
158
+ model: DOLPHIN model instance
159
+ save_dir: Directory to save results
160
+ image_name: Name for the output file
161
+ max_batch_size: Maximum batch size for processing
162
+ save_individual: Whether to save individual results (False for PDF pages)
163
+
164
+ Returns:
165
+ Tuple of (json_path, recognition_results)
166
+ """
167
+ # Stage 1: Page-level layout and reading order parsing
168
+ layout_output = model.chat("Parse the reading order of this document.", image)
169
+
170
+ # Stage 2: Element-level content parsing
171
+ padded_image, dims = prepare_image(image)
172
+ recognition_results = process_elements(layout_output, padded_image, dims, model, max_batch_size, save_dir, image_name)
173
+
174
+ # Save outputs only if requested (skip for PDF pages)
175
+ json_path = None
176
+ if save_individual:
177
+ # Create a dummy image path for save_outputs function
178
+ dummy_image_path = f"{image_name}.jpg" # Extension doesn't matter, only basename is used
179
+ json_path = save_outputs(recognition_results, dummy_image_path, save_dir)
180
+
181
+ return json_path, recognition_results
182
+
183
+
184
+ def process_elements(layout_results, padded_image, dims, model, max_batch_size, save_dir=None, image_name=None):
185
+ """Parse all document elements with parallel decoding"""
186
+ layout_results = parse_layout_string(layout_results)
187
+
188
+ # Store text and table elements separately
189
+ text_elements = [] # Text elements
190
+ table_elements = [] # Table elements
191
+ figure_results = [] # Image elements (no processing needed)
192
+ previous_box = None
193
+ reading_order = 0
194
+
195
+ # Collect elements to process and group by type
196
+ for bbox, label in layout_results:
197
+ try:
198
+ # Adjust coordinates
199
+ x1, y1, x2, y2, orig_x1, orig_y1, orig_x2, orig_y2, previous_box = process_coordinates(
200
+ bbox, padded_image, dims, previous_box
201
+ )
202
+
203
+ # Crop and parse element
204
+ cropped = padded_image[y1:y2, x1:x2]
205
+ if cropped.size > 0 and cropped.shape[0] > 3 and cropped.shape[1] > 3:
206
+ if label == "fig":
207
+ pil_crop = Image.fromarray(cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB))
208
+
209
+ figure_filename = save_figure_to_local(pil_crop, save_dir, image_name, reading_order)
210
+
211
+ # For figure regions, store relative path instead of base64
212
+ figure_results.append(
213
+ {
214
+ "label": label,
215
+ "text": f"![Figure](figures/{figure_filename})",
216
+ "figure_path": f"figures/{figure_filename}",
217
+ "bbox": [orig_x1, orig_y1, orig_x2, orig_y2],
218
+ "reading_order": reading_order,
219
+ }
220
+ )
221
+ else:
222
+ # Prepare element for parsing
223
+ pil_crop = Image.fromarray(cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB))
224
+ element_info = {
225
+ "crop": pil_crop,
226
+ "label": label,
227
+ "bbox": [orig_x1, orig_y1, orig_x2, orig_y2],
228
+ "reading_order": reading_order,
229
+ }
230
+
231
+ # Group by type
232
+ if label == "tab":
233
+ table_elements.append(element_info)
234
+ else: # Text elements
235
+ text_elements.append(element_info)
236
+
237
+ reading_order += 1
238
+
239
+ except Exception as e:
240
+ print(f"Error processing bbox with label {label}: {str(e)}")
241
+ continue
242
+
243
+ # Initialize results list
244
+ recognition_results = figure_results.copy()
245
+
246
+ # Process text elements (in batches)
247
+ if text_elements:
248
+ text_results = process_element_batch(text_elements, model, "Read text in the image.", max_batch_size)
249
+ recognition_results.extend(text_results)
250
+
251
+ # Process table elements (in batches)
252
+ if table_elements:
253
+ table_results = process_element_batch(table_elements, model, "Parse the table in the image.", max_batch_size)
254
+ recognition_results.extend(table_results)
255
+
256
+ # Sort elements by reading order
257
+ recognition_results.sort(key=lambda x: x.get("reading_order", 0))
258
+
259
+ return recognition_results
260
+
261
+
262
+ def process_element_batch(elements, model, prompt, max_batch_size=None):
263
+ """Process elements of the same type in batches"""
264
+ results = []
265
+
266
+ # Determine batch size
267
+ batch_size = len(elements)
268
+ if max_batch_size is not None and max_batch_size > 0:
269
+ batch_size = min(batch_size, max_batch_size)
270
+
271
+ # Process in batches
272
+ for i in range(0, len(elements), batch_size):
273
+ batch_elements = elements[i:i+batch_size]
274
+ crops_list = [elem["crop"] for elem in batch_elements]
275
+
276
+ # Use the same prompt for all elements in the batch
277
+ prompts_list = [prompt] * len(crops_list)
278
+
279
+ # Batch inference
280
+ batch_results = model.chat(prompts_list, crops_list)
281
+
282
+ # Add results
283
+ for j, result in enumerate(batch_results):
284
+ elem = batch_elements[j]
285
+ results.append({
286
+ "label": elem["label"],
287
+ "bbox": elem["bbox"],
288
+ "text": result.strip(),
289
+ "reading_order": elem["reading_order"],
290
+ })
291
+
292
+ return results
293
+
294
+
295
+ def main():
296
+ parser = argparse.ArgumentParser(description="Document parsing based on DOLPHIN")
297
+ parser.add_argument("--model_path", default="./hf_model", help="Path to Hugging Face model")
298
+ parser.add_argument("--input_path", type=str, default="./demo", help="Path to input image/PDF or directory of files")
299
+ parser.add_argument(
300
+ "--save_dir",
301
+ type=str,
302
+ default=None,
303
+ help="Directory to save parsing results (default: same as input directory)",
304
+ )
305
+ parser.add_argument(
306
+ "--max_batch_size",
307
+ type=int,
308
+ default=16,
309
+ help="Maximum number of document elements to parse in a single batch (default: 16)",
310
+ )
311
+ args = parser.parse_args()
312
+
313
+ # Load Model
314
+ model = DOLPHIN(args.model_path)
315
+
316
+ # Collect Document Files (images and PDFs)
317
+ if os.path.isdir(args.input_path):
318
+ # Support both image and PDF files
319
+ file_extensions = [".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG", ".pdf", ".PDF"]
320
+
321
+ document_files = []
322
+ for ext in file_extensions:
323
+ document_files.extend(glob.glob(os.path.join(args.input_path, f"*{ext}")))
324
+ document_files = sorted(document_files)
325
+ else:
326
+ if not os.path.exists(args.input_path):
327
+ raise FileNotFoundError(f"Input path {args.input_path} does not exist")
328
+
329
+ # Check if it's a supported file type
330
+ file_ext = os.path.splitext(args.input_path)[1].lower()
331
+ supported_exts = ['.jpg', '.jpeg', '.png', '.pdf']
332
+
333
+ if file_ext not in supported_exts:
334
+ raise ValueError(f"Unsupported file type: {file_ext}. Supported types: {supported_exts}")
335
+
336
+ document_files = [args.input_path]
337
+
338
+ save_dir = args.save_dir or (
339
+ args.input_path if os.path.isdir(args.input_path) else os.path.dirname(args.input_path)
340
+ )
341
+ setup_output_dirs(save_dir)
342
+
343
+ total_samples = len(document_files)
344
+ print(f"\nTotal files to process: {total_samples}")
345
+
346
+ # Process All Document Files
347
+ for file_path in document_files:
348
+ print(f"\nProcessing {file_path}")
349
+ try:
350
+ json_path, recognition_results = process_document(
351
+ document_path=file_path,
352
+ model=model,
353
+ save_dir=save_dir,
354
+ max_batch_size=args.max_batch_size,
355
+ )
356
+
357
+ print(f"Processing completed. Results saved to {save_dir}")
358
+
359
+ except Exception as e:
360
+ print(f"Error processing {file_path}: {str(e)}")
361
+ continue
362
+
363
+
364
+ if __name__ == "__main__":
365
+ main()
deployment/ReadMe.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 align="center">
2
+ 🚀 Dolphin Inference/Serving
3
+ </h1>
4
+
5
+ ## vLLM
6
+ > [Doc](./vllm/ReadMe.md)
7
+
8
+ ## TensorRT-LLM
9
+ > [Doc](./tensorrt_llm/ReadMe.md)
10
+
11
+ ## Others
12
+
deployment/tensorrt_llm/ReadMe.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 align="center">
2
+ 🚀 Dolphin TensorRT-LLM Demo
3
+ </h1>
4
+
5
+ ## ✅ Introduction
6
+ The Dolphin model employs a **Swin Encoder + MBart Decoder** architecture. In the HuggingFace Transformers [Config](https://huggingface.co/ByteDance/Dolphin/blob/main/config.json),
7
+ its architectures field is specified as "VisionEncoderDecoderModel". **Dolphin**, **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)**, and **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** share the same model architecture. TensorRT-LLM has already supported the Nougat model.
8
+ Following Nougat's conversion script, we have successfully implemented Dolphin on TensorRT-LLM.
9
+
10
+ **Note:** [prompt_ids](./dolphin_runner.py#L120) MUST be of **int32** type, otherwise TensorRT-LLM will produce incorrect results.
11
+
12
+ ## 🛠️ Installation
13
+ > We only test TensorRT-LLM 0.18.1 on Linux.
14
+
15
+ https://nvidia.github.io/TensorRT-LLM/0.18.1/installation/linux.html
16
+
17
+
18
+ ## ⚡ Offline Inference
19
+ ```
20
+ export MODEL_NAME="Dolphin"
21
+
22
+ # predict elements reading order
23
+ python run_dolphin.py \
24
+ --batch_size 1 \
25
+ --hf_model_dir tmp/hf_models/${MODEL_NAME} \
26
+ --visual_engine_dir tmp/trt_engines/${MODEL_NAME}/vision_encoder \
27
+ --llm_engine_dir tmp/trt_engines/${MODEL_NAME}/1-gpu/bfloat16 \
28
+ --max_new_tokens 4096 \
29
+ --repetition_penalty 1.0 \
30
+ --input_text "Parse the reading order of this document." \
31
+ --image_path "../../demo/page_imgs/page_1.jpeg"
32
+
33
+ # recognize text/latex
34
+ python run_dolphin.py \
35
+ --batch_size 1 \
36
+ --hf_model_dir tmp/hf_models/${MODEL_NAME} \
37
+ --visual_engine_dir tmp/trt_engines/${MODEL_NAME}/vision_encoder \
38
+ --llm_engine_dir tmp/trt_engines/${MODEL_NAME}/1-gpu/bfloat16 \
39
+ --max_new_tokens 4096 \
40
+ --repetition_penalty 1.0 \
41
+ --input_text "Read text in the image." \
42
+ --image_path "../../demo/element_imgs/block_formula.jpeg"
43
+
44
+
45
+ python run_dolphin.py \
46
+ --batch_size 1 \
47
+ --hf_model_dir tmp/hf_models/${MODEL_NAME} \
48
+ --visual_engine_dir tmp/trt_engines/${MODEL_NAME}/vision_encoder \
49
+ --llm_engine_dir tmp/trt_engines/${MODEL_NAME}/1-gpu/bfloat16 \
50
+ --max_new_tokens 4096 \
51
+ --repetition_penalty 1.0 \
52
+ --input_text "Read text in the image." \
53
+ --image_path "../../demo/element_imgs/para_1.jpg"
54
+
55
+ # recognize table
56
+ python run_dolphin.py \
57
+ --batch_size 1 \
58
+ --hf_model_dir tmp/hf_models/${MODEL_NAME} \
59
+ --visual_engine_dir tmp/trt_engines/${MODEL_NAME}/vision_encoder \
60
+ --llm_engine_dir tmp/trt_engines/${MODEL_NAME}/1-gpu/bfloat16 \
61
+ --max_new_tokens 4096 \
62
+ --repetition_penalty 1.0 \
63
+ --input_text "Parse the table in the image." \
64
+ --image_path "../../demo/element_imgs/table_1.jpeg"
65
+ ```
66
+
67
+
68
+ ## ⚡ Online Inference
69
+ ```
70
+ # 1. Start Api Server
71
+ export MODEL_NAME="Dolphin"
72
+
73
+ python api_server.py \
74
+ --hf_model_dir tmp/hf_models/${MODEL_NAME} \
75
+ --visual_engine_dir tmp/trt_engines/${MODEL_NAME}/vision_encoder \
76
+ --llm_engine_dir tmp/trt_engines/${MODEL_NAME}/1-gpu/bfloat16 \
77
+ --max_batch_size 16
78
+
79
+ # 2. Predict
80
+ # predict elements reading order
81
+ python deployment/tensorrt_llm/api_client.py --image_path ./demo/page_imgs/page_1.jpeg --prompt "Parse the reading order of this document."
82
+
83
+ # recognize text/latex
84
+ python deployment/tensorrt_llm/api_client.py --image_path ./demo/element_imgs/block_formula.jpeg --prompt "Read text in the image."
85
+ python deployment/tensorrt_llm/api_client.py --image_path ./demo/element_imgs/para_1.jpg --prompt "Read text in the image."
86
+
87
+ # recognize table
88
+ python deployment/tensorrt_llm/api_client.py --image_path ./demo/element_imgs/table_1.jpeg --prompt "Parse the table in the image."
89
+ ```
deployment/tensorrt_llm/api_client.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Example Python client for `vllm.entrypoints.api_server`
4
+ Start the demo server:
5
+ python -m vllm.entrypoints.api_server --model <model_name>
6
+
7
+ NOTE: The API server is used only for demonstration and simple performance
8
+ benchmarks. It is not intended for production use.
9
+ For production use, we recommend `vllm serve` and the OpenAI client API.
10
+ """
11
+
12
+ import argparse
13
+ import base64
14
+ import json
15
+ from argparse import Namespace
16
+ from collections.abc import Iterable
17
+
18
+ import requests
19
+
20
+
21
+ def clear_line(n: int = 1) -> None:
22
+ LINE_UP = "\033[1A"
23
+ LINE_CLEAR = "\x1b[2K"
24
+ for _ in range(n):
25
+ print(LINE_UP, end=LINE_CLEAR, flush=True)
26
+
27
+
28
+ def encode_image_base64(image_path: str) -> str:
29
+ """Encode local image to base64 format."""
30
+
31
+ with open(image_path, "rb") as f:
32
+ image_data = f.read()
33
+ result = base64.b64encode(image_data).decode("utf-8")
34
+
35
+ return result
36
+
37
+
38
+ def post_http_request(
39
+ prompt: str, image_path: str, api_url: str, stream: bool = False
40
+ ) -> requests.Response:
41
+ headers = {"User-Agent": "Test Client"}
42
+ pload = {
43
+ "prompt": prompt,
44
+ "image_base64": encode_image_base64(image_path),
45
+ }
46
+ response = requests.post(api_url, headers=headers, json=pload, stream=stream)
47
+ return response
48
+
49
+
50
+ def get_streaming_response(response: requests.Response) -> Iterable[list[str]]:
51
+ for chunk in response.iter_lines(
52
+ chunk_size=8192, decode_unicode=False, delimiter=b"\n"
53
+ ):
54
+ if chunk:
55
+ data = json.loads(chunk.decode("utf-8"))
56
+ output = data["text"]
57
+ yield output
58
+
59
+
60
+ def get_response(response: requests.Response) -> list[str]:
61
+ data = json.loads(response.content)
62
+ output = data["text"]
63
+ return output
64
+
65
+
66
+ def parse_args():
67
+ parser = argparse.ArgumentParser()
68
+ parser.add_argument("--host", type=str, default="localhost")
69
+ parser.add_argument("--port", type=int, default=8000)
70
+ parser.add_argument("--prompt", type=str, default="Parse the reading order of this document.")
71
+ parser.add_argument("--image_path", type=str, default="./demo/page_imgs/page_1.jpeg")
72
+ parser.add_argument("--stream", action="store_true")
73
+ return parser.parse_args()
74
+
75
+
76
+ def main(args: Namespace):
77
+ prompt = args.prompt
78
+ image_path = args.image_path
79
+ api_url = f"http://{args.host}:{args.port}/generate"
80
+ stream = args.stream
81
+
82
+ print(f"Prompt: {prompt!r}\n", flush=True)
83
+ response = post_http_request(prompt, image_path, api_url, stream)
84
+
85
+ if stream:
86
+ num_printed_lines = 0
87
+ for h in get_streaming_response(response):
88
+ clear_line(num_printed_lines)
89
+ num_printed_lines = 0
90
+ for i, line in enumerate(h):
91
+ num_printed_lines += 1
92
+ print(f"Response {i}: {line!r}", flush=True)
93
+ else:
94
+ output = get_response(response)
95
+ print(f"Response: {output!r}", flush=True)
96
+
97
+
98
+ if __name__ == "__main__":
99
+ args = parse_args()
100
+ main(args)
deployment/tensorrt_llm/api_server.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # copied from: https://github.com/NVIDIA/TensorRT-LLM/blob/v0.18.1/examples/apps/fastapi_server.py
2
+
3
+ #!/usr/bin/env python
4
+ import asyncio
5
+ import base64
6
+ import io
7
+ import logging
8
+ import signal
9
+ from http import HTTPStatus
10
+ from PIL import Image
11
+ from typing import Optional
12
+
13
+ import click
14
+ import uvicorn
15
+ from fastapi import FastAPI, Request
16
+ from fastapi.responses import JSONResponse, Response
17
+
18
+ from tensorrt_llm.executor import CppExecutorError, RequestError
19
+ from dolphin_runner import DolphinRunner, InferenceConfig
20
+
21
+ TIMEOUT_KEEP_ALIVE = 5 # seconds.
22
+
23
+
24
+ async def decode_image(image_base64: str) -> Image.Image:
25
+ image_data = base64.b64decode(image_base64)
26
+ image = Image.open(io.BytesIO(image_data))
27
+ return image
28
+
29
+
30
+ class LlmServer:
31
+ def __init__(self, runner: DolphinRunner):
32
+ self.runner = runner
33
+ self.app = FastAPI()
34
+ self.register_routes()
35
+
36
+ def register_routes(self):
37
+ self.app.add_api_route("/health", self.health, methods=["GET"])
38
+ self.app.add_api_route("/generate", self.generate, methods=["POST"])
39
+
40
+ async def health(self) -> Response:
41
+ return Response(status_code=200)
42
+
43
+ async def generate(self, request: Request) -> Response:
44
+ """ Generate completion for the request.
45
+
46
+ The request should be a JSON object with the following fields:
47
+ - prompt: the prompt to use for the generation.
48
+ - image_base64: the image to use for the generation.
49
+ """
50
+ request_dict = await request.json()
51
+
52
+ prompt = request_dict.pop("prompt", "")
53
+ logging.info(f"request prompt: {prompt}")
54
+ image_base64 = request_dict.pop("image_base64", "")
55
+ image = await decode_image(image_base64)
56
+
57
+ try:
58
+ output_texts = self.runner.run([prompt], [image], 4024)
59
+ output_texts = [texts[0] for texts in output_texts]
60
+ return JSONResponse({"text": output_texts[0]})
61
+ except RequestError as e:
62
+ return JSONResponse(content=str(e),
63
+ status_code=HTTPStatus.BAD_REQUEST)
64
+ except CppExecutorError:
65
+ # If internal executor error is raised, shutdown the server
66
+ signal.raise_signal(signal.SIGINT)
67
+
68
+ async def __call__(self, host, port):
69
+ config = uvicorn.Config(self.app,
70
+ host=host,
71
+ port=port,
72
+ log_level="info",
73
+ timeout_keep_alive=TIMEOUT_KEEP_ALIVE)
74
+ await uvicorn.Server(config).serve()
75
+
76
+
77
+ @click.command()
78
+ @click.option("--hf_model_dir", type=str, required=True)
79
+ @click.option("--visual_engine_dir", type=str, required=True)
80
+ @click.option("--llm_engine_dir", type=str, required=True)
81
+ @click.option("--max_batch_size", type=int, default=16)
82
+ @click.option("--max_new_tokens", type=int, default=4024)
83
+ @click.option("--host", type=str, default=None)
84
+ @click.option("--port", type=int, default=8000)
85
+ def entrypoint(hf_model_dir: str,
86
+ visual_engine_dir: str,
87
+ llm_engine_dir: str,
88
+ max_batch_size: int,
89
+ max_new_tokens: int,
90
+ host: Optional[str] = None,
91
+ port: int = 8000):
92
+ host = host or "0.0.0.0"
93
+ port = port or 8000
94
+ logging.info(f"Starting server at {host}:{port}")
95
+
96
+ config = InferenceConfig(
97
+ max_new_tokens=max_new_tokens,
98
+ batch_size=max_batch_size,
99
+ log_level="info",
100
+ hf_model_dir=hf_model_dir,
101
+ visual_engine_dir=visual_engine_dir,
102
+ llm_engine_dir=llm_engine_dir,
103
+ )
104
+
105
+ dolphin_runner = DolphinRunner(config)
106
+ server = LlmServer(runner=dolphin_runner)
107
+
108
+ asyncio.run(server(host, port))
109
+
110
+
111
+ if __name__ == "__main__":
112
+ entrypoint()
deployment/tensorrt_llm/convert/__init__.py ADDED
File without changes
deployment/tensorrt_llm/convert/build_visual_engine.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # copied from: https://github.com/NVIDIA/TensorRT-LLM/blob/v0.18.2/examples/multimodal/build_visual_engine.py
2
+
3
+ import argparse
4
+
5
+ from tensorrt_llm.tools.multimodal_builder import (VisionEngineBuilder,
6
+ add_multimodal_arguments)
7
+
8
+ if __name__ == '__main__':
9
+ parser = argparse.ArgumentParser()
10
+ parser = add_multimodal_arguments(parser)
11
+ args = parser.parse_args()
12
+
13
+ builder = VisionEngineBuilder(args)
14
+ builder.build()