Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,15 +16,15 @@ model.to('cpu')
|
|
16 |
def generate_text(prompt, temperature, top_p):
|
17 |
prompt_with_eos = " #CONTEXT# " + prompt + " #TOPIC# " # Add the string "EOS" to the end of the prompt
|
18 |
input_tokens = tokenizer.encode(prompt_with_eos, return_tensors='pt')
|
19 |
-
|
20 |
-
if input_tokens.size(1) > 512:
|
21 |
-
return "ERROR, CONTEXT SIZE EXCEEDED"
|
22 |
|
23 |
input_tokens = input_tokens.to('cpu')
|
24 |
-
|
25 |
generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
|
26 |
prompt_length = len(generated_text)
|
27 |
|
|
|
|
|
|
|
28 |
for _ in range(80): # Adjust the range to control the number of tokens generated
|
29 |
with torch.no_grad():
|
30 |
outputs = model(input_tokens)
|
|
|
16 |
def generate_text(prompt, temperature, top_p):
|
17 |
prompt_with_eos = " #CONTEXT# " + prompt + " #TOPIC# " # Add the string "EOS" to the end of the prompt
|
18 |
input_tokens = tokenizer.encode(prompt_with_eos, return_tensors='pt')
|
|
|
|
|
|
|
19 |
|
20 |
input_tokens = input_tokens.to('cpu')
|
21 |
+
|
22 |
generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
|
23 |
prompt_length = len(generated_text)
|
24 |
|
25 |
+
if input_tokens.size(1) > 512:
|
26 |
+
generated = "ERROR, CONTEXT SIZE EXCEEDED"
|
27 |
+
|
28 |
for _ in range(80): # Adjust the range to control the number of tokens generated
|
29 |
with torch.no_grad():
|
30 |
outputs = model(input_tokens)
|