TiberiuCristianLeon commited on
Commit
03dd083
·
verified ·
1 Parent(s): 1f3055b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -207,7 +207,7 @@ class Translators:
207
  def bigscience(self):
208
  tokenizer = AutoTokenizer.from_pretrained(self.model_name)
209
  model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
210
- self.input_text = self.input_text if self.input_text.endswith('.') else f{self.input_text}.'
211
  inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
212
  outputs = model.generate(inputs)
213
  translation = tokenizer.decode(outputs[0])
@@ -217,7 +217,7 @@ class Translators:
217
  def bloomz(self):
218
  tokenizer = AutoTokenizer.from_pretrained(self.model_name)
219
  model = AutoModelForCausalLM.from_pretrained(self.model_name)
220
- self.input_text = self.input_text if self.input_text.endswith('.') else f{self.input_text}.'
221
  # inputs = tokenizer.encode(f"Translate from {self.sl} to {self.tl}: {self.input_text} Translation:", return_tensors="pt")
222
  inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
223
  outputs = model.generate(inputs)
 
207
  def bigscience(self):
208
  tokenizer = AutoTokenizer.from_pretrained(self.model_name)
209
  model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
210
+ self.input_text = self.input_text if self.input_text.endswith('.') else f'{self.input_text}.'
211
  inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
212
  outputs = model.generate(inputs)
213
  translation = tokenizer.decode(outputs[0])
 
217
  def bloomz(self):
218
  tokenizer = AutoTokenizer.from_pretrained(self.model_name)
219
  model = AutoModelForCausalLM.from_pretrained(self.model_name)
220
+ self.input_text = self.input_text if self.input_text.endswith('.') else f'{self.input_text}.'
221
  # inputs = tokenizer.encode(f"Translate from {self.sl} to {self.tl}: {self.input_text} Translation:", return_tensors="pt")
222
  inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
223
  outputs = model.generate(inputs)