Spaces:
Runtime error
Runtime error
Commit
·
a1084bc
1
Parent(s):
3ce52c0
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,15 +1,18 @@
|
|
| 1 |
-
|
| 2 |
from peft import PeftModel
|
| 3 |
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
|
| 4 |
|
| 5 |
-
|
| 6 |
-
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
| 7 |
model = LlamaForCausalLM.from_pretrained(
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
-
model = PeftModel.from_pretrained(model, "tloen/alpaca-lora-7b")
|
| 13 |
|
| 14 |
def generate_prompt(instruction, input=None):
|
| 15 |
if input:
|
|
@@ -51,49 +54,3 @@ def evaluate(instruction, input=None):
|
|
| 51 |
output = tokenizer.decode(s)
|
| 52 |
print("Response:", output.split("### Response:")[1].strip())
|
| 53 |
|
| 54 |
-
import streamlit as st
|
| 55 |
-
from peft import PeftModel
|
| 56 |
-
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
|
| 57 |
-
|
| 58 |
-
model_name = 'bhaskar/LLaMA-7B-peft'
|
| 59 |
-
tokenizer = LlamaTokenizer.from_pretrained(model_name)
|
| 60 |
-
model = LlamaForCausalLM.from_pretrained(model_name).cuda()
|
| 61 |
-
generation_config = GenerationConfig(
|
| 62 |
-
do_sample=True,
|
| 63 |
-
max_length=1024,
|
| 64 |
-
top_p=0.9,
|
| 65 |
-
temperature=1.0,
|
| 66 |
-
no_repeat_ngram_size=3,
|
| 67 |
-
num_return_sequences=1,
|
| 68 |
-
)
|
| 69 |
-
|
| 70 |
-
def generate_prompt(instruction):
|
| 71 |
-
return f"### Instruction: {instruction}\n\n### Response:"
|
| 72 |
-
|
| 73 |
-
def evaluate1(instruction):
|
| 74 |
-
prompt = generate_prompt(instruction)
|
| 75 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
| 76 |
-
input_ids = inputs["input_ids"].cuda()
|
| 77 |
-
generation_output = model.generate(
|
| 78 |
-
input_ids=input_ids,
|
| 79 |
-
generation_config=generation_config,
|
| 80 |
-
return_dict_in_generate=True,
|
| 81 |
-
output_scores=True,
|
| 82 |
-
max_new_tokens=256
|
| 83 |
-
)
|
| 84 |
-
for s in generation_output.sequences:
|
| 85 |
-
output = tokenizer.decode(s)
|
| 86 |
-
return output.split("### Response:")[1].strip()
|
| 87 |
-
|
| 88 |
-
def main():
|
| 89 |
-
st.set_page_config(page_title="LLaMA-7B Language Model")
|
| 90 |
-
st.title("LLaMA-7B Language Model")
|
| 91 |
-
st.write("This is a LLaMA-7B language model fine-tuned on various text datasets to generate text for a given task. It was trained on PyTorch by and is capable of generating high-quality, coherent text that is similar to human writing. The model is highly versatile and can be used for a variety of tasks, including text completion, summarization, and translation.")
|
| 92 |
-
instruction = st.text_area("Instruction", height=200)
|
| 93 |
-
if st.button("Generate Response"):
|
| 94 |
-
with st.spinner("Generating response..."):
|
| 95 |
-
output = evaluate1(instruction)
|
| 96 |
-
st.write(output)
|
| 97 |
-
|
| 98 |
-
if __name__ == "__main__":
|
| 99 |
-
main()
|
|
|
|
| 1 |
+
import torch
|
| 2 |
from peft import PeftModel
|
| 3 |
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
|
| 4 |
|
| 5 |
+
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-13b-hf")
|
|
|
|
| 6 |
model = LlamaForCausalLM.from_pretrained(
|
| 7 |
+
"decapoda-research/llama-13b-hf",
|
| 8 |
+
load_in_8bit=True,
|
| 9 |
+
torch_dtype=torch.float16,
|
| 10 |
+
device_map="auto",
|
| 11 |
+
)
|
| 12 |
+
model = PeftModel.from_pretrained(
|
| 13 |
+
model, "baruga/alpaca-lora-13b",
|
| 14 |
+
torch_dtype=torch.float16
|
| 15 |
)
|
|
|
|
| 16 |
|
| 17 |
def generate_prompt(instruction, input=None):
|
| 18 |
if input:
|
|
|
|
| 54 |
output = tokenizer.decode(s)
|
| 55 |
print("Response:", output.split("### Response:")[1].strip())
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|