|
|
|
|
|
import h5py |
|
import numpy as np |
|
from PIL import Image |
|
import matplotlib.pyplot as plt |
|
from transformers import VisionEncoderDecoderModel, TrOCRProcessor |
|
import torch |
|
|
|
|
|
def load_dataset(file_path): |
|
with h5py.File(file_path, 'r') as f: |
|
images = f['images'][:] |
|
texts = [t.decode('utf-8') if isinstance(t, bytes) else t for t in f['texts'][:]] |
|
return images, texts |
|
|
|
|
|
train_images, train_texts = load_dataset('train_dataset.h5') |
|
print(f"Loaded {len(train_images)} training samples") |
|
|
|
|
|
def display_sample(images, texts, idx=None): |
|
if idx is None: |
|
idx = np.random.randint(0, len(images)) |
|
|
|
print(f"Text: {texts[idx]}") |
|
|
|
plt.figure(figsize=(12, 3)) |
|
plt.imshow(images[idx]) |
|
plt.axis('off') |
|
plt.title(f"Sample {idx}") |
|
plt.show() |
|
|
|
return idx |
|
|
|
|
|
sample_idx = display_sample(train_images, train_texts) |
|
|
|
|
|
def test_with_trocr(image, model_name="microsoft/trocr-base-printed"): |
|
|
|
processor = TrOCRProcessor.from_pretrained(model_name) |
|
model = VisionEncoderDecoderModel.from_pretrained(model_name) |
|
|
|
|
|
if isinstance(image, np.ndarray): |
|
image = Image.fromarray(image) |
|
|
|
|
|
pixel_values = processor(image, return_tensors="pt").pixel_values |
|
|
|
|
|
generated_ids = model.generate(pixel_values) |
|
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] |
|
|
|
return generated_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|