Spaces:
Running
Running
UniquePratham
commited on
Commit
•
4a67835
1
Parent(s):
0efdb28
Delete ocr_cpu.py
Browse files- ocr_cpu.py +0 -88
ocr_cpu.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from transformers import AutoModel, AutoTokenizer, Qwen2VLForConditionalGeneration, AutoProcessor, MllamaForConditionalGeneration
|
3 |
-
import torch
|
4 |
-
import re
|
5 |
-
from PIL import Image
|
6 |
-
|
7 |
-
# ---- GOT OCR Model Initialization and Extraction ----
|
8 |
-
|
9 |
-
def init_got_model():
|
10 |
-
"""Initialize GOT model and tokenizer."""
|
11 |
-
model_name = "srimanth-d/GOT_CPU"
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, return_tensors='pt')
|
13 |
-
model = AutoModel.from_pretrained(model_name, trust_remote_code=True, low_cpu_mem_usage=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
14 |
-
return model.eval(), tokenizer
|
15 |
-
|
16 |
-
def extract_text_got(uploaded_file):
|
17 |
-
"""Extract text from the uploaded image using GOT model."""
|
18 |
-
temp_file_path = 'temp_image_got.jpg'
|
19 |
-
try:
|
20 |
-
with open(temp_file_path, 'wb') as temp_file:
|
21 |
-
temp_file.write(uploaded_file.read())
|
22 |
-
|
23 |
-
print(f"Processing image using GOT from: {temp_file_path}")
|
24 |
-
model, tokenizer = init_got_model()
|
25 |
-
outputs = model.chat(tokenizer, temp_file_path, ocr_type='ocr')
|
26 |
-
|
27 |
-
if outputs and isinstance(outputs, list):
|
28 |
-
return outputs[0].strip() if outputs[0].strip() else "No text extracted."
|
29 |
-
return "No text extracted."
|
30 |
-
except Exception as e:
|
31 |
-
return f"Error: {str(e)}"
|
32 |
-
finally:
|
33 |
-
if os.path.exists(temp_file_path):
|
34 |
-
os.remove(temp_file_path)
|
35 |
-
|
36 |
-
# ---- Qwen OCR Model Initialization and Extraction ----
|
37 |
-
|
38 |
-
def init_qwen_model():
|
39 |
-
"""Initialize Qwen model and processor."""
|
40 |
-
model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", device_map="cpu", torch_dtype=torch.float16)
|
41 |
-
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
|
42 |
-
return model.eval(), processor
|
43 |
-
|
44 |
-
def extract_text_qwen(uploaded_file):
|
45 |
-
"""Extract text using Qwen model."""
|
46 |
-
try:
|
47 |
-
model, processor = init_qwen_model()
|
48 |
-
image = Image.open(uploaded_file).convert('RGB')
|
49 |
-
conversation = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Extract text from this image."}]}]
|
50 |
-
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
51 |
-
inputs = processor(text=[prompt], images=[image], return_tensors="pt")
|
52 |
-
output_ids = model.generate(**inputs)
|
53 |
-
output_text = processor.batch_decode(output_ids, skip_special_tokens=True)
|
54 |
-
return output_text[0] if output_text else "No text extracted."
|
55 |
-
except Exception as e:
|
56 |
-
return f"Error: {str(e)}"
|
57 |
-
|
58 |
-
# ---- LLaMA OCR Model Initialization and Extraction ----
|
59 |
-
|
60 |
-
def init_llama_model():
|
61 |
-
"""Initialize LLaMA OCR model and processor."""
|
62 |
-
model = MllamaForConditionalGeneration.from_pretrained("meta-llama/Llama-3.2-11B-Vision-Instruct", torch_dtype=torch.bfloat16, device_map="cpu")
|
63 |
-
processor = AutoProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision-Instruct")
|
64 |
-
return model.eval(), processor
|
65 |
-
|
66 |
-
def extract_text_llama(uploaded_file):
|
67 |
-
"""Extract text using LLaMA model."""
|
68 |
-
try:
|
69 |
-
model, processor = init_llama_model()
|
70 |
-
image = Image.open(uploaded_file).convert('RGB')
|
71 |
-
prompt = "You are an OCR engine. Extract text from this image."
|
72 |
-
inputs = processor(images=image, text=prompt, return_tensors="pt")
|
73 |
-
output_ids = model.generate(**inputs)
|
74 |
-
return processor.decode(output_ids[0], skip_special_tokens=True).strip()
|
75 |
-
except Exception as e:
|
76 |
-
return f"Error: {str(e)}"
|
77 |
-
|
78 |
-
# ---- AI-based Text Cleanup ----
|
79 |
-
|
80 |
-
def clean_extracted_text(text):
|
81 |
-
"""Clean the extracted text by removing extra spaces intelligently."""
|
82 |
-
# Remove multiple spaces
|
83 |
-
cleaned_text = re.sub(r'\s+', ' ', text).strip()
|
84 |
-
|
85 |
-
# Further clean punctuations with spaces around them
|
86 |
-
cleaned_text = re.sub(r'\s([?.!,])', r'\1', cleaned_text)
|
87 |
-
|
88 |
-
return cleaned_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|