5to9 commited on
Commit
c7efb5e
·
1 Parent(s): 44cc2f7

0.50 set pad_token manually

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -24,7 +24,7 @@ system_prompts = {
24
  "Spanish": "Eres un chatbot servicial que responde a las entradas de los usuarios de forma concisa y original."
25
  }
26
 
27
- htmL_info = "<center><h1>Pharia Battle Royale</h1><p>Let the games begin: In this bot arena, the Pharia 1 mode competes against a challenger. Try a prompt in a language you like. Set the parameters and vote for the best answers. After casting your vote, the bots reveal their identity.</p></center>"
28
 
29
  model_info = [{"id": "Aleph-Alpha/Pharia-1-LLM-7B-control-hf",
30
  "name": "Pharia 1 LLM 7B control hf"}]
@@ -112,8 +112,8 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
112
  tokenized = tokenizer_a(formatted_conversation, return_tensors="pt").to(device)
113
  logging.debug(tokenized) #attention_mask
114
  input_ids_a = tokenized.input_ids
115
- logging.debug(f'tokenizer_a.pad_token is {tokenizer_a.pad_token}')
116
  tokenizer_a.eos_token = "<|endoftext|>" # not set für Pharia
 
117
  else:
118
  input_ids_a = tokenizer_a.apply_chat_template(
119
  new_messages_a,
@@ -129,6 +129,7 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
129
  input_ids_b = tokenized.input_ids
130
  logging.debug(f'tokenizer_b.pad_token is {tokenizer_b.pad_token}')
131
  tokenizer_b.eos_token = "<|endoftext|>" # not set für Pharia
 
132
  else:
133
  input_ids_b = tokenizer_b.apply_chat_template(
134
  new_messages_b,
 
24
  "Spanish": "Eres un chatbot servicial que responde a las entradas de los usuarios de forma concisa y original."
25
  }
26
 
27
+ htmL_info = "<center><h1>Pharia Battle Royale</h1><p>Let the games begin: In this bot arena, the Pharia 1 model competes against a challenger. Try a prompt in a language you want to explore. Set the parameters and vote for the best answers. After casting your vote, the bots reveal their identity. Inputs, outputs and votes are logged anonymously for further insight.</p></center>"
28
 
29
  model_info = [{"id": "Aleph-Alpha/Pharia-1-LLM-7B-control-hf",
30
  "name": "Pharia 1 LLM 7B control hf"}]
 
112
  tokenized = tokenizer_a(formatted_conversation, return_tensors="pt").to(device)
113
  logging.debug(tokenized) #attention_mask
114
  input_ids_a = tokenized.input_ids
 
115
  tokenizer_a.eos_token = "<|endoftext|>" # not set für Pharia
116
+ tokenizer_a.pad_token = "<|padding|>" # not set für Pharia
117
  else:
118
  input_ids_a = tokenizer_a.apply_chat_template(
119
  new_messages_a,
 
129
  input_ids_b = tokenized.input_ids
130
  logging.debug(f'tokenizer_b.pad_token is {tokenizer_b.pad_token}')
131
  tokenizer_b.eos_token = "<|endoftext|>" # not set für Pharia
132
+ tokenizer_b.pad_token = "<|padding|>" # not set für Pharia
133
  else:
134
  input_ids_b = tokenizer_b.apply_chat_template(
135
  new_messages_b,