import torch
import numpy as np
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
from torchvision import transforms as T
from transformers import VisionEncoderDecoderModel, AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained('tirthadagr8/CustomOCR')
model=VisionEncoderDecoderModel.from_pretrained('tirthadagr8/CustomOCR')

def resize_with_padding(image, target_size=(224, 224)):
    # Resize to fit within target_size while preserving aspect ratio
    image.thumbnail((target_size[0], target_size[1]))
    delta_w = target_size[0] - image.width
    delta_h = target_size[1] - image.height
    padding = (delta_w//2, delta_h//2, delta_w - (delta_w//2), delta_h - (delta_h//2))
    padded_img = ImageOps.expand(image, padding, fill="white")
    transform = T.Compose([
        T.ToTensor(),  # Convert to tensor and scale to [0, 1]
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # Normalize
    ])
    return transform((padded_img))

path="0106.jpg"
img=resize_with_padding(Image.open(path))
model.eval()
with torch.no_grad():
    print(tokenizer.batch_decode(model.cuda().generate(img.unsqueeze(0).cuda()),skip_special_tokens=True))

plt.imshow(img.permute(1,2,0).detach().cpu().numpy())
Downloads last month
184
Safetensors
Model size
191M params
Tensor type
F32
·
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no library tag.