import torch from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration import logging class TranslationModel: def __init__(self, cache_dir="models/"): self.device = torch.device("cpu") logging.info("Using CPU for translations") self.model_name = "facebook/m2m100_1.2B" self.tokenizer = M2M100Tokenizer.from_pretrained( self.model_name, cache_dir=cache_dir, local_files_only=True # Only use cached files ) self.model = M2M100ForConditionalGeneration.from_pretrained( self.model_name, cache_dir=cache_dir, local_files_only=True, device_map="cpu", low_cpu_mem_usage=True ) self.model.eval() def translate(self, text: str, source_lang: str, target_lang: str) -> str: try: self.tokenizer.src_lang = source_lang encoded = self.tokenizer(text, return_tensors="pt") with torch.no_grad(): generated = self.model.generate( **encoded, forced_bos_token_id=self.tokenizer.get_lang_id(target_lang), max_length=128 ) return self.tokenizer.batch_decode(generated, skip_special_tokens=True)[0] except Exception as e: return f"Translation error: {str(e)}"