yuntian-deng commited on
Commit
2d0cc03
1 Parent(s): 3ffaf8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -16,19 +16,22 @@ MAX_RESULT_TOKENS = 10
16
 
17
  @spaces.GPU
18
  def predict_answer(question):
19
- input_text = ' '.join(question.split()).strip() + ' ' + tokenizer.eos_token
20
- print (input_text)
21
- inputs = tokenizer(input_text, return_tensors='pt').to('cuda' if torch.cuda.is_available() else 'cpu')
22
- implicit_cot_model.to('cuda' if torch.cuda.is_available() else 'cpu')
23
-
24
- input_ids = inputs['input_ids']
25
- print (input_ids)
26
- outputs = implicit_cot_model.generate(input_ids=input_ids,
27
- max_new_tokens=MAX_RESULT_TOKENS,
28
- do_sample=False)
29
- print (outputs)
30
-
31
- prediction = tokenizer.decode(outputs[0, input_ids.shape[-1]:], skip_special_tokens=True)
 
 
 
32
 
33
  return prediction
34
 
 
16
 
17
  @spaces.GPU
18
  def predict_answer(question):
19
+ try:
20
+ input_text = ' '.join(question.split()).strip() + ' ' + tokenizer.eos_token
21
+ print (input_text)
22
+ inputs = tokenizer(input_text, return_tensors='pt').to('cuda' if torch.cuda.is_available() else 'cpu')
23
+ implicit_cot_model.to('cuda' if torch.cuda.is_available() else 'cpu')
24
+
25
+ input_ids = inputs['input_ids']
26
+ #print (input_ids)
27
+ outputs = implicit_cot_model.generate(input_ids=input_ids,
28
+ max_new_tokens=MAX_RESULT_TOKENS,
29
+ do_sample=False)
30
+ #print (outputs)
31
+
32
+ prediction = tokenizer.decode(outputs[0, input_ids.shape[-1]:], skip_special_tokens=True)
33
+ except Exception as e:
34
+ prediction = f'{e}'
35
 
36
  return prediction
37