Update README.md
Browse files
README.md
CHANGED
@@ -64,11 +64,9 @@ You can use this model just as any other HuggingFace models:
|
|
64 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
65 |
model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-191M-world', trust_remote_code=True)
|
66 |
tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-191M-world', trust_remote_code=True)
|
67 |
-
model = model.cuda()
|
68 |
prompt = "What is a large language model?"
|
69 |
messages = [
|
70 |
-
{"role": "user", "content": "Who are you?"},
|
71 |
-
{"role": "assistant", "content": "I am a GPT-3 based model."},
|
72 |
{"role": "user", "content": prompt}
|
73 |
]
|
74 |
text = tokenizer.apply_chat_template(
|
@@ -81,7 +79,11 @@ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
|
81 |
|
82 |
generated_ids = model.generate(
|
83 |
**model_inputs,
|
84 |
-
max_new_tokens=
|
|
|
|
|
|
|
|
|
85 |
)
|
86 |
generated_ids = [
|
87 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
|
|
64 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
65 |
model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-191M-world', trust_remote_code=True)
|
66 |
tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-191M-world', trust_remote_code=True)
|
67 |
+
model = model.cuda() # Supported on Nvidia/AMD/Intel eg. model.xpu()
|
68 |
prompt = "What is a large language model?"
|
69 |
messages = [
|
|
|
|
|
70 |
{"role": "user", "content": prompt}
|
71 |
]
|
72 |
text = tokenizer.apply_chat_template(
|
|
|
79 |
|
80 |
generated_ids = model.generate(
|
81 |
**model_inputs,
|
82 |
+
max_new_tokens=4096,
|
83 |
+
do_sample=True,
|
84 |
+
temperature=1.0,
|
85 |
+
top_p=0.3,
|
86 |
+
repetition_penalty=1.2
|
87 |
)
|
88 |
generated_ids = [
|
89 |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|