bhaskartripathi commited on
Commit
c8e58cc
·
1 Parent(s): 8c8c5db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +220 -152
app.py CHANGED
@@ -1,159 +1,227 @@
1
- import torch
2
- from peft import PeftModel
3
- import transformers
 
 
 
4
  import gradio as gr
 
 
5
 
6
- assert (
7
- "LlamaTokenizer" in transformers._import_structure["models.llama"]
8
- ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
9
- from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
10
-
11
- tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
12
-
13
- BASE_MODEL = "decapoda-research/llama-7b-hf"
14
- LORA_WEIGHTS = "tloen/alpaca-lora-7b"
15
-
16
- if torch.cuda.is_available():
17
- device = "cuda"
18
- else:
19
- device = "cpu"
20
-
21
- try:
22
- if torch.backends.mps.is_available():
23
- device = "mps"
24
- except:
25
- pass
26
-
27
- if device == "cuda":
28
- model = LlamaForCausalLM.from_pretrained(
29
- BASE_MODEL,
30
- load_in_8bit=False,
31
- torch_dtype=torch.float16,
32
- device_map="auto",
33
- )
34
- model = PeftModel.from_pretrained(
35
- model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
36
- )
37
- elif device == "mps":
38
- model = LlamaForCausalLM.from_pretrained(
39
- BASE_MODEL,
40
- device_map={"": device},
41
- torch_dtype=torch.float16,
42
- )
43
- model = PeftModel.from_pretrained(
44
- model,
45
- LORA_WEIGHTS,
46
- device_map={"": device},
47
- torch_dtype=torch.float16,
48
- )
49
- else:
50
- model = LlamaForCausalLM.from_pretrained(
51
- BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  )
53
- model = PeftModel.from_pretrained(
54
- model,
55
- LORA_WEIGHTS,
56
- device_map={"": device},
 
 
 
 
 
 
 
 
 
 
 
57
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
 
60
- def generate_prompt(instruction, input=None):
61
- if input:
62
- return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
63
- ### Instruction:
64
- {instruction}
65
- ### Input:
66
- {input}
67
- ### Response:"""
 
 
 
 
 
 
68
  else:
69
- return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
70
- ### Instruction:
71
- {instruction}
72
- ### Response:"""
73
-
74
- model.half()
75
- model.eval()
76
- if torch.__version__ >= "2":
77
- model = torch.compile(model)
78
-
79
-
80
- def evaluate(
81
- instruction,
82
- input=None,
83
- temperature=0.1,
84
- top_p=0.75,
85
- top_k=40,
86
- num_beams=4,
87
- max_new_tokens=128,
88
- **kwargs,
89
- ):
90
- prompt = generate_prompt(instruction, input)
91
- inputs = tokenizer(prompt, return_tensors="pt")
92
- input_ids = inputs["input_ids"].to(device)
93
- generation_config = GenerationConfig(
94
- temperature=temperature,
95
- top_p=top_p,
96
- top_k=top_k,
97
- num_beams=num_beams,
98
- **kwargs,
99
- )
100
- with torch.no_grad():
101
- generation_output = model.generate(
102
- input_ids=input_ids,
103
- generation_config=generation_config,
104
- return_dict_in_generate=True,
105
- output_scores=True,
106
- max_new_tokens=max_new_tokens,
107
- )
108
- s = generation_output.sequences[0]
109
- output = tokenizer.decode(s)
110
- return output.split("### Response:")[1].strip()
111
-
112
-
113
- g = gr.Interface(
114
- fn=evaluate,
115
- inputs=[
116
- gr.components.Textbox(
117
- lines=2, label="Instruction", placeholder="Tell me about alpacas."
118
- ),
119
- gr.components.Textbox(lines=2, label="Input", placeholder="none"),
120
- gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
121
- gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
122
- gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
123
- gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
124
- gr.components.Slider(
125
- minimum=1, maximum=512, step=1, value=128, label="Max tokens"
126
- ),
127
- ],
128
- outputs=[
129
- gr.inputs.Textbox(
130
- lines=5,
131
- label="Output",
132
- )
133
- ],
134
- title="🦙🌲 Alpaca-LoRA",
135
- description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
136
- )
137
- g.queue(concurrency_count=1)
138
- g.launch()
139
-
140
- # Old testing code follows.
141
-
142
- """
143
- if __name__ == "__main__":
144
- # testing code for readme
145
- for instruction in [
146
- "Tell me about alpacas.",
147
- "Tell me about the president of Mexico in 2019.",
148
- "Tell me about the king of France in 2019.",
149
- "List all Canadian provinces in alphabetical order.",
150
- "Write a Python program that prints the first 10 Fibonacci numbers.",
151
- "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.",
152
- "Tell me five words that rhyme with 'shock'.",
153
- "Translate the sentence 'I have no mouth but I must scream' into Spanish.",
154
- "Count up from 1 to 500.",
155
- ]:
156
- print("Instruction:", instruction)
157
- print("Response:", evaluate(instruction))
158
- print()
159
- """
 
1
+ import urllib.request
2
+ import fitz
3
+ import re
4
+ import numpy as np
5
+ import tensorflow_hub as hub
6
+ import openai
7
  import gradio as gr
8
+ import os
9
+ from sklearn.neighbors import NearestNeighbors
10
 
11
+ def download_pdf(url, output_path):
12
+ urllib.request.urlretrieve(url, output_path)
13
+
14
+
15
+ def preprocess(text):
16
+ text = text.replace('\n', ' ')
17
+ text = re.sub('\s+', ' ', text)
18
+ return text
19
+
20
+
21
+ def pdf_to_text(path, start_page=1, end_page=None):
22
+ doc = fitz.open(path)
23
+ total_pages = doc.page_count
24
+
25
+ if end_page is None:
26
+ end_page = total_pages
27
+
28
+ text_list = []
29
+
30
+ for i in range(start_page-1, end_page):
31
+ text = doc.load_page(i).get_text("text")
32
+ text = preprocess(text)
33
+ text_list.append(text)
34
+
35
+ doc.close()
36
+ return text_list
37
+
38
+
39
+ def text_to_chunks(texts, word_length=150, start_page=1):
40
+ text_toks = [t.split(' ') for t in texts]
41
+ page_nums = []
42
+ chunks = []
43
+
44
+ for idx, words in enumerate(text_toks):
45
+ for i in range(0, len(words), word_length):
46
+ chunk = words[i:i+word_length]
47
+ if (i+word_length) > len(words) and (len(chunk) < word_length) and (
48
+ len(text_toks) != (idx+1)):
49
+ text_toks[idx+1] = chunk + text_toks[idx+1]
50
+ continue
51
+ chunk = ' '.join(chunk).strip()
52
+ chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"'
53
+ chunks.append(chunk)
54
+ return chunks
55
+
56
+
57
+ class SemanticSearch:
58
+
59
+ def __init__(self):
60
+ self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
61
+ self.fitted = False
62
+
63
+
64
+ def fit(self, data, batch=1000, n_neighbors=5):
65
+ self.data = data
66
+ self.embeddings = self.get_text_embedding(data, batch=batch)
67
+ n_neighbors = min(n_neighbors, len(self.embeddings))
68
+ self.nn = NearestNeighbors(n_neighbors=n_neighbors)
69
+ self.nn.fit(self.embeddings)
70
+ self.fitted = True
71
+
72
+
73
+ def __call__(self, text, return_data=True):
74
+ inp_emb = self.use([text])
75
+ neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
76
+
77
+ if return_data:
78
+ return [self.data[i] for i in neighbors]
79
+ else:
80
+ return neighbors
81
+
82
+
83
+ def get_text_embedding(self, texts, batch=1000):
84
+ embeddings = []
85
+ for i in range(0, len(texts), batch):
86
+ text_batch = texts[i:(i+batch)]
87
+ emb_batch = self.use(text_batch)
88
+ embeddings.append(emb_batch)
89
+ embeddings = np.vstack(embeddings)
90
+ return embeddings
91
+
92
+
93
+
94
+ #def load_recommender(path, start_page=1):
95
+ # global recommender
96
+ # texts = pdf_to_text(path, start_page=start_page)
97
+ # chunks = text_to_chunks(texts, start_page=start_page)
98
+ # recommender.fit(chunks)
99
+ # return 'Corpus Loaded.'
100
+
101
+ # The modified function generates embeddings based on PDF file name and page number and checks if the embeddings file exists before loading or generating it.
102
+ def load_recommender(path, start_page=1):
103
+ global recommender
104
+ pdf_file = os.path.basename(path)
105
+ embeddings_file = f"{pdf_file}_{start_page}.npy"
106
+
107
+ if os.path.isfile(embeddings_file):
108
+ embeddings = np.load(embeddings_file)
109
+ recommender.embeddings = embeddings
110
+ recommender.fitted = True
111
+ return "Embeddings loaded from file"
112
+
113
+ texts = pdf_to_text(path, start_page=start_page)
114
+ chunks = text_to_chunks(texts, start_page=start_page)
115
+ recommender.fit(chunks)
116
+ np.save(embeddings_file, recommender.embeddings)
117
+ return 'Corpus Loaded.'
118
+
119
+
120
+
121
+ def generate_text(openAI_key,prompt, engine="text-davinci-003"):
122
+ openai.api_key = openAI_key
123
+ completions = openai.Completion.create(
124
+ engine=engine,
125
+ prompt=prompt,
126
+ max_tokens=512,
127
+ n=1,
128
+ stop=None,
129
+ temperature=0.7,
130
  )
131
+ message = completions.choices[0].text
132
+ return message
133
+
134
+ def generate_text2(openAI_key, prompt, engine="gpt-3.5-turbo-0301"):
135
+ openai.api_key = openAI_key
136
+ messages = [{'role': 'system', 'content': 'You are a helpful assistant.'},
137
+ {'role': 'user', 'content': prompt}]
138
+
139
+ completions = openai.ChatCompletion.create(
140
+ model=engine,
141
+ messages=messages,
142
+ max_tokens=512,
143
+ n=1,
144
+ stop=None,
145
+ temperature=0.7,
146
  )
147
+ message = completions.choices[0].message['content']
148
+ return message
149
+
150
+ def generate_answer(question,openAI_key):
151
+ topn_chunks = recommender(question)
152
+ prompt = ""
153
+ prompt += 'search results:\n\n'
154
+ for c in topn_chunks:
155
+ prompt += c + '\n\n'
156
+
157
+ prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
158
+ "Cite each reference using [ Page Number] notation (every result has this number at the beginning). "\
159
+ "Citation should be done at the end of each sentence. If the search results mention multiple subjects "\
160
+ "with the same name, create separate answers for each. Only include information found in the results and "\
161
+ "don't add any additional information. Make sure the answer is correct and don't output false content. "\
162
+ "If the text does not relate to the query, simply state 'Text Not Found in PDF'. Ignore outlier "\
163
+ "search results which has nothing to do with the question. Only answer what is asked. The "\
164
+ "answer should be short and concise. Answer step-by-step. \n\nQuery: {question}\nAnswer: "
165
+
166
+ prompt += f"Query: {question}\nAnswer:"
167
+ answer = generate_text(openAI_key, prompt,"text-davinci-003")
168
+ return answer
169
 
170
 
171
+ def question_answer(url, file, question,openAI_key):
172
+ if openAI_key.strip()=='':
173
+ return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'
174
+ if url.strip() == '' and file == None:
175
+ return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'
176
+
177
+ if url.strip() != '' and file != None:
178
+ return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'
179
+
180
+ if url.strip() != '':
181
+ glob_url = url
182
+ download_pdf(glob_url, 'corpus.pdf')
183
+ load_recommender('corpus.pdf')
184
+
185
  else:
186
+ old_file_name = file.name
187
+ file_name = file.name
188
+ file_name = file_name[:-12] + file_name[-4:]
189
+ os.rename(old_file_name, file_name)
190
+ load_recommender(file_name)
191
+
192
+ if question.strip() == '':
193
+ return '[ERROR]: Question field is empty'
194
+
195
+ return generate_answer(question,openAI_key)
196
+
197
+
198
+ recommender = SemanticSearch()
199
+
200
+ title = 'PDF GPT'
201
+ description = """ What is PDF GPT ?
202
+ 1. The problem is that Open AI has a 4K token limit and cannot take an entire PDF file as input. Additionally, it sometimes returns irrelevant responses due to poor embeddings. ChatGPT cannot directly talk to external data. The solution is PDF GPT, which allows you to chat with an uploaded PDF file using GPT functionalities. The application breaks the document into smaller chunks and generates embeddings using a powerful Deep Averaging Network Encoder. A semantic search is performed on your query, and the top relevant chunks are used to generate a response.
203
+ 2. The returned response can even cite the page number in square brackets([]) where the information is located, adding credibility to the responses and helping to locate pertinent information quickly. The Responses are much better than the naive responses by Open AI."""
204
+
205
+ with gr.Blocks() as demo:
206
+
207
+ gr.Markdown(f'<center><h1>{title}</h1></center>')
208
+ gr.Markdown(description)
209
+
210
+ with gr.Row():
211
+
212
+ with gr.Group():
213
+ gr.Markdown(f'<p style="text-align:center">Get your Open AI API key <a href="https://platform.openai.com/account/api-keys">here</a></p>')
214
+ openAI_key=gr.Textbox(label='Enter your OpenAI API key here')
215
+ url = gr.Textbox(label='Enter PDF URL here')
216
+ gr.Markdown("<center><h4>OR<h4></center>")
217
+ file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])
218
+ question = gr.Textbox(label='Enter your question here')
219
+ btn = gr.Button(value='Submit')
220
+ btn.style(full_width=True)
221
+
222
+ with gr.Group():
223
+ answer = gr.Textbox(label='The answer to your question is :')
224
+
225
+ btn.click(question_answer, inputs=[url, file, question,openAI_key], outputs=[answer])
226
+ #openai.api_key = os.getenv('Your_Key_Here')
227
+ demo.launch()