bhaskartripathi commited on
Commit
8c8c5db
·
1 Parent(s): aa0647f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +124 -121
app.py CHANGED
@@ -1,54 +1,93 @@
1
- """Python file to serve as the frontend"""
2
- import streamlit as st
3
- from streamlit_chat import message
4
-
5
- from langchain.chains import ConversationChain, LLMChain
6
- from langchain import PromptTemplate
7
- from langchain.llms.base import LLM
8
- from langchain.memory import ConversationBufferWindowMemory
9
- from typing import Optional, List, Mapping, Any
10
-
11
  import torch
12
  from peft import PeftModel
13
  import transformers
 
14
 
 
 
 
15
  from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
16
- from transformers import BitsAndBytesConfig
17
 
18
  tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
 
22
- quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
23
-
24
- model = LlamaForCausalLM.from_pretrained(
25
- "decapoda-research/llama-7b-hf",
26
- # load_in_8bit=True,
27
- # torch_dtype=torch.float16,
28
- #device_map="auto",
29
- device_map={"":"cpu"},
30
- max_memory={"cpu":"15GiB"},
31
- quantization_config=quantization_config
32
- )
33
- model = PeftModel.from_pretrained(
34
- model, "tloen/alpaca-lora-7b",
35
- # torch_dtype=torch.float16,
36
- device_map={"":"cpu"},
37
- )
38
 
39
- device = "cpu"
40
- print("model device :", model.device, flush=True)
41
- # model.to(device)
42
  model.eval()
43
-
44
- def evaluate_raw_prompt(
45
- prompt:str,
46
- temperature=0.1,
47
- top_p=0.75,
48
- top_k=40,
49
- num_beams=4,
50
- **kwargs,
 
 
 
 
 
51
  ):
 
52
  inputs = tokenizer(prompt, return_tensors="pt")
53
  input_ids = inputs["input_ids"].to(device)
54
  generation_config = GenerationConfig(
@@ -64,93 +103,57 @@ def evaluate_raw_prompt(
64
  generation_config=generation_config,
65
  return_dict_in_generate=True,
66
  output_scores=True,
67
- max_new_tokens=256,
68
  )
69
  s = generation_output.sequences[0]
70
  output = tokenizer.decode(s)
71
- # return output
72
  return output.split("### Response:")[1].strip()
73
 
74
- class AlpacaLLM(LLM):
75
- temperature: float
76
- top_p: float
77
- top_k: int
78
- num_beams: int
79
- @property
80
- def _llm_type(self) -> str:
81
- return "custom"
82
-
83
- def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
84
- if stop is not None:
85
- raise ValueError("stop kwargs are not permitted.")
86
- answer = evaluate_raw_prompt(prompt,
87
- top_p= self.top_p,
88
- top_k= self.top_k,
89
- num_beams= self.num_beams,
90
- temperature= self.temperature
91
- )
92
- return answer
93
-
94
- @property
95
- def _identifying_params(self) -> Mapping[str, Any]:
96
- """Get the identifying parameters."""
97
- return {
98
- "top_p": self.top_p,
99
- "top_k": self.top_k,
100
- "num_beams": self.num_beams,
101
- "temperature": self.temperature
102
- }
103
-
104
 
105
- template = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
106
- ### Instruction:
107
- You are a chatbot, you should answer my last question very briefly. You are consistent and non repetitive.
108
- ### Chat:
109
- {history}
110
- Human: {human_input}
111
- ### Response:"""
112
-
113
- prompt = PromptTemplate(
114
- input_variables=["history","human_input"],
115
- template=template,
 
 
 
 
 
 
 
 
 
 
 
 
116
  )
117
-
118
-
119
- def load_chain():
120
- """Logic for loading the chain you want to use should go here."""
121
- llm = AlpacaLLM(top_p=0.75, top_k=40, num_beams=4, temperature=0.1)
122
- # chain = ConversationChain(llm=llm)
123
- chain = LLMChain(llm=llm, prompt=prompt, memory=ConversationBufferWindowMemory(k=2))
124
- return chain
125
-
126
- chain = load_chain()
127
-
128
- # From here down is all the StreamLit UI.
129
- st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
130
- st.header("LangChain Demo")
131
-
132
- if "generated" not in st.session_state:
133
- st.session_state["generated"] = []
134
-
135
- if "past" not in st.session_state:
136
- st.session_state["past"] = []
137
-
138
-
139
- def get_text():
140
- input_text = st.text_input("Human: ", "Hello, how are you?", key="input")
141
- return input_text
142
-
143
-
144
- user_input = get_text()
145
-
146
- if user_input:
147
- output = chain.predict(human_input=user_input)
148
-
149
- st.session_state.past.append(user_input)
150
- st.session_state.generated.append(output)
151
-
152
- if st.session_state["generated"]:
153
-
154
- for i in range(len(st.session_state["generated"]) - 1, -1, -1):
155
- message(st.session_state["generated"][i], key=str(i))
156
- message(st.session_state["past"][i], is_user=True, key=str(i) + "_user")
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  from peft import PeftModel
3
  import transformers
4
+ import gradio as gr
5
 
6
+ assert (
7
+ "LlamaTokenizer" in transformers._import_structure["models.llama"]
8
+ ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
9
  from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
 
10
 
11
  tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
12
 
13
+ BASE_MODEL = "decapoda-research/llama-7b-hf"
14
+ LORA_WEIGHTS = "tloen/alpaca-lora-7b"
15
+
16
+ if torch.cuda.is_available():
17
+ device = "cuda"
18
+ else:
19
+ device = "cpu"
20
+
21
+ try:
22
+ if torch.backends.mps.is_available():
23
+ device = "mps"
24
+ except:
25
+ pass
26
+
27
+ if device == "cuda":
28
+ model = LlamaForCausalLM.from_pretrained(
29
+ BASE_MODEL,
30
+ load_in_8bit=False,
31
+ torch_dtype=torch.float16,
32
+ device_map="auto",
33
+ )
34
+ model = PeftModel.from_pretrained(
35
+ model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
36
+ )
37
+ elif device == "mps":
38
+ model = LlamaForCausalLM.from_pretrained(
39
+ BASE_MODEL,
40
+ device_map={"": device},
41
+ torch_dtype=torch.float16,
42
+ )
43
+ model = PeftModel.from_pretrained(
44
+ model,
45
+ LORA_WEIGHTS,
46
+ device_map={"": device},
47
+ torch_dtype=torch.float16,
48
+ )
49
+ else:
50
+ model = LlamaForCausalLM.from_pretrained(
51
+ BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
52
+ )
53
+ model = PeftModel.from_pretrained(
54
+ model,
55
+ LORA_WEIGHTS,
56
+ device_map={"": device},
57
+ )
58
 
59
 
60
+ def generate_prompt(instruction, input=None):
61
+ if input:
62
+ return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
63
+ ### Instruction:
64
+ {instruction}
65
+ ### Input:
66
+ {input}
67
+ ### Response:"""
68
+ else:
69
+ return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
70
+ ### Instruction:
71
+ {instruction}
72
+ ### Response:"""
 
 
 
73
 
74
+ model.half()
 
 
75
  model.eval()
76
+ if torch.__version__ >= "2":
77
+ model = torch.compile(model)
78
+
79
+
80
+ def evaluate(
81
+ instruction,
82
+ input=None,
83
+ temperature=0.1,
84
+ top_p=0.75,
85
+ top_k=40,
86
+ num_beams=4,
87
+ max_new_tokens=128,
88
+ **kwargs,
89
  ):
90
+ prompt = generate_prompt(instruction, input)
91
  inputs = tokenizer(prompt, return_tensors="pt")
92
  input_ids = inputs["input_ids"].to(device)
93
  generation_config = GenerationConfig(
 
103
  generation_config=generation_config,
104
  return_dict_in_generate=True,
105
  output_scores=True,
106
+ max_new_tokens=max_new_tokens,
107
  )
108
  s = generation_output.sequences[0]
109
  output = tokenizer.decode(s)
 
110
  return output.split("### Response:")[1].strip()
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ g = gr.Interface(
114
+ fn=evaluate,
115
+ inputs=[
116
+ gr.components.Textbox(
117
+ lines=2, label="Instruction", placeholder="Tell me about alpacas."
118
+ ),
119
+ gr.components.Textbox(lines=2, label="Input", placeholder="none"),
120
+ gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
121
+ gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
122
+ gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
123
+ gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
124
+ gr.components.Slider(
125
+ minimum=1, maximum=512, step=1, value=128, label="Max tokens"
126
+ ),
127
+ ],
128
+ outputs=[
129
+ gr.inputs.Textbox(
130
+ lines=5,
131
+ label="Output",
132
+ )
133
+ ],
134
+ title="🦙🌲 Alpaca-LoRA",
135
+ description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
136
  )
137
+ g.queue(concurrency_count=1)
138
+ g.launch()
139
+
140
+ # Old testing code follows.
141
+
142
+ """
143
+ if __name__ == "__main__":
144
+ # testing code for readme
145
+ for instruction in [
146
+ "Tell me about alpacas.",
147
+ "Tell me about the president of Mexico in 2019.",
148
+ "Tell me about the king of France in 2019.",
149
+ "List all Canadian provinces in alphabetical order.",
150
+ "Write a Python program that prints the first 10 Fibonacci numbers.",
151
+ "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.",
152
+ "Tell me five words that rhyme with 'shock'.",
153
+ "Translate the sentence 'I have no mouth but I must scream' into Spanish.",
154
+ "Count up from 1 to 500.",
155
+ ]:
156
+ print("Instruction:", instruction)
157
+ print("Response:", evaluate(instruction))
158
+ print()
159
+ """