rohangbs commited on
Commit
79b08ef
Β·
verified Β·
1 Parent(s): b907b51

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +283 -0
app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import json
3
+ import os
4
+ import numpy as np
5
+ import faiss
6
+ from sentence_transformers import SentenceTransformer
7
+ from PyPDF2 import PdfReader
8
+ from openai import OpenAI
9
+ import time
10
+ from PIL import Image
11
+
12
+ class IntegratedChatSystem:
13
+ def __init__(self, api_key: str):
14
+ self.api_key = api_key
15
+ self.client = OpenAI(api_key=api_key)
16
+ self.embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
17
+ self.embedding_dim = 384
18
+ self.index = faiss.IndexFlatIP(self.embedding_dim)
19
+ self.metadata = []
20
+ self.fine_tuned_model = None
21
+
22
+
23
+ def add_image(self, image, context_text: str):
24
+ """Add an image and its context to the retrieval system"""
25
+ try:
26
+ # Generate embedding for the context text
27
+ embedding = self.embedding_model.encode(context_text)
28
+ embedding = np.expand_dims(embedding, axis=0)
29
+
30
+ # Save image and add to index
31
+ if not os.path.exists('uploaded_images'):
32
+ os.makedirs('uploaded_images')
33
+
34
+ # Generate unique filename
35
+ filename = f"image_{len(self.metadata)}.jpg"
36
+ image_path = os.path.join('uploaded_images', filename)
37
+
38
+ # Save image
39
+ image.save(image_path)
40
+
41
+ # Add to FAISS index
42
+ self.index.add(embedding)
43
+ self.metadata.append({
44
+ "filepath": image_path,
45
+ "context": context_text
46
+ })
47
+
48
+ return True
49
+ except Exception as e:
50
+ st.error(f"Error adding image: {str(e)}")
51
+ return False
52
+
53
+ def search_relevant_images(self, query: str, similarity_threshold: float = 0.7, top_k: int = 3):
54
+ """Search for relevant images based on query"""
55
+ try:
56
+ if self.index.ntotal == 0:
57
+ return []
58
+
59
+ # Generate embedding for the query
60
+ query_embedding = self.embedding_model.encode(query)
61
+ query_embedding = np.expand_dims(query_embedding, axis=0)
62
+
63
+ # Search in the index
64
+ distances, indices = self.index.search(query_embedding, min(top_k, self.index.ntotal))
65
+
66
+ # Filter results based on similarity threshold
67
+ relevant_images = [
68
+ self.metadata[i] for i, distance in zip(indices[0], distances[0])
69
+ if i != -1 and distance >= similarity_threshold
70
+ ]
71
+
72
+ return relevant_images
73
+ except Exception as e:
74
+ st.error(f"Error searching images: {str(e)}")
75
+ return []
76
+
77
+ def generate_qna_pairs(self, text: str):
78
+ """Generate question-answer pairs from text using OpenAI API"""
79
+ try:
80
+ completion = self.client.chat.completions.create(
81
+ model="gpt-3.5-turbo",
82
+ messages=[
83
+ {"role": "system", "content": "Generate 11 relevant question-answer pairs from the given text. Format each pair as a complete, informative question with its corresponding detailed answer."},
84
+ {"role": "user", "content": f"Text: {text}"}
85
+ ],
86
+ temperature=0.7
87
+ )
88
+
89
+ response_text = completion.choices[0].message.content
90
+ qa_pairs = []
91
+
92
+ pairs = response_text.split('\n\n')
93
+ for pair in pairs:
94
+ if 'Q:' in pair and 'A:' in pair:
95
+ question = pair.split('A:')[0].replace('Q:', '').strip()
96
+ answer = pair.split('A:')[1].strip()
97
+
98
+ qa_pairs.append({
99
+ "messages": [
100
+ {"role": "system", "content": "You are an assistant chatbot. You should help the user by answering their question."},
101
+ {"role": "user", "content": question},
102
+ {"role": "assistant", "content": answer}
103
+ ]
104
+ })
105
+
106
+ return qa_pairs
107
+ except Exception as e:
108
+ st.error(f"Error generating QA pairs: {str(e)}")
109
+ return []
110
+
111
+ def create_fine_tuning_job(self, training_file_id):
112
+ try:
113
+ response = self.client.fine_tuning.jobs.create(
114
+ training_file=training_file_id,
115
+ model="gpt-3.5-turbo-0125"
116
+ )
117
+ return response.id
118
+ except Exception as e:
119
+ st.error(f"Error creating fine-tuning job: {str(e)}")
120
+ return None
121
+
122
+
123
+ def monitor_fine_tuning_job(self, job_id):
124
+ try:
125
+ progress_bar = st.progress(0)
126
+ status_text = st.empty()
127
+ details_text = st.empty()
128
+
129
+ stages = {
130
+ "validating_files": "Validating training files...",
131
+ "queued": "Job queued - waiting to start...",
132
+ "running": "Training in progress...",
133
+ "succeeded": "Training completed successfully!",
134
+ "failed": "Training failed.",
135
+ "cancelled": "Training was cancelled."
136
+ }
137
+
138
+ # Approximate progress percentages for each stage
139
+ progress_mapping = {
140
+ "validating_files": 0.1,
141
+ "queued": 0.2,
142
+ "running": 0.6,
143
+ "succeeded": 1.0,
144
+ "failed": 1.0,
145
+ "cancelled": 1.0
146
+ }
147
+
148
+ last_status = None
149
+ start_time = time.time()
150
+
151
+ while True:
152
+ job_status = self.client.fine_tuning.jobs.retrieve(job_id)
153
+ current_status = job_status.status
154
+
155
+ # Update progress bar
156
+ progress_bar.progress(progress_mapping.get(current_status, 0))
157
+
158
+ # Update status message
159
+ status_message = stages.get(current_status, "Processing...")
160
+ status_text.markdown(f"**Status:** {status_message}")
161
+
162
+ # Show elapsed time and other details
163
+ elapsed_time = int(time.time() - start_time)
164
+ details_text.markdown(f"""
165
+ **Details:**
166
+ - Time elapsed: {elapsed_time // 60}m {elapsed_time % 60}s
167
+ - Job ID: {job_id}
168
+ - Current stage: {current_status}
169
+ """)
170
+
171
+ # Status changed notification
172
+ if current_status != last_status:
173
+ if current_status == "running":
174
+ st.info("πŸš€ Model training has begun!")
175
+ elif current_status == "succeeded":
176
+ st.success("βœ… Fine-tuning completed successfully!")
177
+ self.fine_tuned_model = job_status.fine_tuned_model
178
+ st.balloons() # Celebration effect
179
+ # Display model details
180
+ st.markdown(f"""
181
+ **Training Completed!**
182
+ - Model ID: `{self.fine_tuned_model}`
183
+ - Total training time: {elapsed_time // 60}m {elapsed_time % 60}s
184
+ - Status: Ready to use
185
+
186
+ You can now use the chat interface to interact with your fine-tuned model!
187
+ """)
188
+ return True
189
+ elif current_status in ["failed", "cancelled"]:
190
+ st.error(f"❌ Training {current_status}. Please check the OpenAI dashboard for details.")
191
+ return False
192
+
193
+ last_status = current_status
194
+ time.sleep(10)
195
+
196
+ except Exception as e:
197
+ st.error(f"Error monitoring fine-tuning job: {str(e)}")
198
+ return False
199
+
200
+ # Initialize Streamlit interface
201
+ st.title("PDF Fine-tuning and Chat System with Image Retrieval")
202
+
203
+ # Initialize session state
204
+ if 'chat_system' not in st.session_state:
205
+ api_key = "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"
206
+ st.session_state.chat_system = IntegratedChatSystem(api_key)
207
+
208
+ # Sidebar for image upload
209
+ with st.sidebar:
210
+ st.header("Image Upload")
211
+ uploaded_image = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
212
+ image_context = st.text_area("Image Context Description")
213
+
214
+ if uploaded_image and image_context and st.button("Add Image"):
215
+ image = Image.open(uploaded_image)
216
+ if st.session_state.chat_system.add_image(image, image_context):
217
+ st.success("Image added successfully!")
218
+
219
+ # Main area tabs
220
+ tab1, tab2 = st.tabs(["Fine-tuning", "Chat"])
221
+
222
+ with tab1:
223
+ st.header("Upload and Fine-tune")
224
+ uploaded_file = st.file_uploader("Upload a PDF for Fine-Tuning", type=["pdf"])
225
+
226
+ if uploaded_file is not None:
227
+ if st.button("Process and Fine-tune"):
228
+ with st.spinner("Processing PDF..."):
229
+ # Extract text from PDF
230
+ reader = PdfReader(uploaded_file)
231
+ text = "\n".join([page.extract_text() for page in reader.pages])
232
+
233
+ # Show processing steps
234
+ progress_placeholder = st.empty()
235
+
236
+ # Step 1: Generate QA pairs
237
+ progress_placeholder.text("Step 1/3: Generating QA pairs...")
238
+ qa_pairs = st.session_state.chat_system.generate_qna_pairs(text)
239
+
240
+ if qa_pairs:
241
+ # Step 2: Save and upload training file
242
+ progress_placeholder.text("Step 2/3: Preparing training file...")
243
+ jsonl_file = "questions_and_answers.jsonl"
244
+ with open(jsonl_file, 'w') as f:
245
+ for pair in qa_pairs:
246
+ json.dump(pair, f)
247
+ f.write("\n")
248
+
249
+ with open(jsonl_file, "rb") as f:
250
+ response = st.session_state.chat_system.client.files.create(
251
+ file=f,
252
+ purpose="fine-tune"
253
+ )
254
+ training_file_id = response.id
255
+
256
+ # Step 3: Start fine-tuning
257
+ progress_placeholder.text("Step 3/3: Starting fine-tuning process...")
258
+ job_id = st.session_state.chat_system.create_fine_tuning_job(training_file_id)
259
+
260
+ if job_id:
261
+ progress_placeholder.empty() # Clear the step indicator
262
+ st.info(f"🎯 Fine-tuning job initiated!")
263
+ st.session_state.chat_system.monitor_fine_tuning_job(job_id)
264
+
265
+ with tab2:
266
+ st.header("Chat Interface")
267
+ if st.session_state.chat_system.fine_tuned_model:
268
+ st.success(f"Using fine-tuned model: {st.session_state.chat_system.fine_tuned_model}")
269
+ else:
270
+ st.info("Using default model (fine-tuned model not available)")
271
+
272
+ user_message = st.text_input("Enter your message:")
273
+ if st.button("Send") and user_message:
274
+ result = st.session_state.chat_system.chat(user_message)
275
+
276
+ st.write("Response:", result["response"])
277
+
278
+ if result["relevant_images"]:
279
+ st.subheader("Relevant Images:")
280
+ for img_data in result["relevant_images"]:
281
+ if os.path.exists(img_data["filepath"]):
282
+ image = Image.open(img_data["filepath"])
283
+ st.image(image, caption=img_data["context"])