Update README.md
Browse files
README.md
CHANGED
@@ -25,11 +25,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
25 |
import torch
|
26 |
model_path = "janbol/pythia-reverse-160m-Flan"
|
27 |
|
28 |
-
#Load the tokenizer and model
|
29 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
30 |
model = AutoModelForCausalLM.from_pretrained(model_path)
|
31 |
|
32 |
-
#Move model to GPU if available
|
33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
34 |
model = model.to(device)
|
35 |
|
@@ -59,7 +59,8 @@ def generate_flipped(prompt, max_length=100):
|
|
59 |
|
60 |
# Flip back and decode
|
61 |
return tokenizer.decode(torch.flip(output, (1,))[0], skip_special_tokens=True)
|
62 |
-
print(generate_flipped("
|
|
|
63 |
|
64 |
|
65 |
####### Output ##########
|
|
|
25 |
import torch
|
26 |
model_path = "janbol/pythia-reverse-160m-Flan"
|
27 |
|
28 |
+
# Load the tokenizer and model
|
29 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
30 |
model = AutoModelForCausalLM.from_pretrained(model_path)
|
31 |
|
32 |
+
# Move model to GPU if available
|
33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
34 |
model = model.to(device)
|
35 |
|
|
|
59 |
|
60 |
# Flip back and decode
|
61 |
return tokenizer.decode(torch.flip(output, (1,))[0], skip_special_tokens=True)
|
62 |
+
print(generate_flipped("Thus this planet is theoretically habitable"))
|
63 |
+
|
64 |
|
65 |
|
66 |
####### Output ##########
|