from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
import torch

src_lang="pt"
tgt_lang="sw" # Emakhuwa was mapped to Swahili
text="Dez histórias que falam por 60 milhões de refugiados"

device = "cuda:0" if torch.cuda.is_available() else "cpu"
model_name="felerminoali/m2m_bilingual_pt-vmw_65k"
model = M2M100ForConditionalGeneration.from_pretrained(model_name).to(device)
tokenizer = M2M100Tokenizer.from_pretrained(model_name)

# translate
tokenizer.src_lang = src_lang
encoded_zh = tokenizer(text, return_tensors="pt")
generated_tokens = model.generate(**encoded_zh.to(model.device), forced_bos_token_id=tokenizer.get_lang_id(tgt_lang))
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
print(tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0])
Downloads last month
2
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support