Spaces:
Sleeping
Sleeping
0.49 check pad_token
Browse files
app.py
CHANGED
@@ -112,8 +112,8 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
|
|
112 |
tokenized = tokenizer_a(formatted_conversation, return_tensors="pt").to(device)
|
113 |
logging.debug(tokenized) #attention_mask
|
114 |
input_ids_a = tokenized.input_ids
|
115 |
-
logging.debug(f'tokenizer_a.
|
116 |
-
tokenizer_a.eos_token = "<|endoftext|>"
|
117 |
else:
|
118 |
input_ids_a = tokenizer_a.apply_chat_template(
|
119 |
new_messages_a,
|
@@ -127,8 +127,8 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
|
|
127 |
tokenized = tokenizer_b(formatted_conversation, return_tensors="pt").to(device)
|
128 |
logging.debug(tokenized)
|
129 |
input_ids_b = tokenized.input_ids
|
130 |
-
logging.debug(f'tokenizer_b.
|
131 |
-
tokenizer_b.eos_token = "<|endoftext|>"
|
132 |
else:
|
133 |
input_ids_b = tokenizer_b.apply_chat_template(
|
134 |
new_messages_b,
|
|
|
112 |
tokenized = tokenizer_a(formatted_conversation, return_tensors="pt").to(device)
|
113 |
logging.debug(tokenized) #attention_mask
|
114 |
input_ids_a = tokenized.input_ids
|
115 |
+
logging.debug(f'tokenizer_a.pad_token is {tokenizer_a.pad_token}')
|
116 |
+
tokenizer_a.eos_token = "<|endoftext|>" # not set für Pharia
|
117 |
else:
|
118 |
input_ids_a = tokenizer_a.apply_chat_template(
|
119 |
new_messages_a,
|
|
|
127 |
tokenized = tokenizer_b(formatted_conversation, return_tensors="pt").to(device)
|
128 |
logging.debug(tokenized)
|
129 |
input_ids_b = tokenized.input_ids
|
130 |
+
logging.debug(f'tokenizer_b.pad_token is {tokenizer_b.pad_token}')
|
131 |
+
tokenizer_b.eos_token = "<|endoftext|>" # not set für Pharia
|
132 |
else:
|
133 |
input_ids_b = tokenizer_b.apply_chat_template(
|
134 |
new_messages_b,
|