bhagyabonam commited on
Commit
d6e9b21
Β·
verified Β·
1 Parent(s): 2e5ea5b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +539 -0
app.py ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chromadb
2
+ from chromadb.config import Settings
3
+ from chromadb import Client
4
+ from transformers import AutoTokenizer, AutoModel, pipeline
5
+ import pandas as pd
6
+ import numpy as np
7
+ import streamlit as st
8
+ import speech_recognition as sr
9
+ from textblob import TextBlob
10
+ from google.oauth2.service_account import Credentials
11
+ from googleapiclient.discovery import build
12
+ import torch
13
+ import faiss
14
+ from sentence_transformers import SentenceTransformer
15
+ import matplotlib.pyplot as plt
16
+ from sklearn.feature_extraction.text import TfidfVectorizer
17
+ import zipfile
18
+
19
+
20
+ SPREADSHEET_ID = "1CsBub3Jlwyo7WHMQty6SDnBShIZMjl5XTVSoOKrxZhc"
21
+ RANGE_NAME = 'Sheet1!A1:B1'
22
+ SERVICE_ACCOUNT_FILE = r"C:\Users\bhagy\AI\credentials.json"
23
+
24
+
25
+ csv_file_path = r"C:\Users\bhagy\OneDrive\Desktop\INFOSYS PROJECT\900_products_dataset.csv"
26
+
27
+
28
+ class CustomEmbeddingFunction:
29
+ def __init__(self, model_name="sentence-transformers/all-MiniLM-L6-v2"):
30
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
31
+ self.model = AutoModel.from_pretrained(model_name)
32
+
33
+ def __call__(self, text):
34
+ inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
35
+ with torch.no_grad():
36
+ outputs = self.model(**inputs)
37
+ embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
38
+ return embeddings
39
+
40
+ # Initialize components
41
+ sentiment_pipeline = pipeline("sentiment-analysis")
42
+ chroma_client = Client(Settings(persist_directory="chromadb_storage"))
43
+ embedding_fn = CustomEmbeddingFunction()
44
+ collection_name = "crm_data"
45
+
46
+ try:
47
+ collection = chroma_client.get_collection(collection_name)
48
+ except Exception:
49
+ collection = chroma_client.create_collection(collection_name)
50
+
51
+ def get_google_sheets_service():
52
+ credentials = Credentials.from_service_account_file(
53
+ SERVICE_ACCOUNT_FILE,
54
+ scopes=["https://www.googleapis.com/auth/spreadsheets"]
55
+ )
56
+ return build('sheets', 'v4', credentials=credentials)
57
+
58
+ def update_google_sheet(response, sentiment):
59
+ """
60
+ Writes the AI response and sentiment to Google Sheets.
61
+ """
62
+ try:
63
+ service = get_google_sheets_service()
64
+ values = [[str(response), str(sentiment)]]
65
+ body = {'values': values}
66
+ result = service.spreadsheets().values().update(
67
+ spreadsheetId=SPREADSHEET_ID,
68
+ range=RANGE_NAME,
69
+ valueInputOption="RAW",
70
+ body=body
71
+ ).execute()
72
+ st.success("Response and sentiment written to Google Sheets!")
73
+ except Exception as e:
74
+ st.error(f"Failed to update Google Sheets: {e}")
75
+
76
+
77
+
78
+ def analyze_sentiment_combined(text):
79
+
80
+ textblob_polarity = TextBlob(text).sentiment.polarity
81
+
82
+ huggingface_result = sentiment_pipeline(text)[0]
83
+ huggingface_label = huggingface_result['label']
84
+ huggingface_score = huggingface_result['score']
85
+ print("huggingface_score:", huggingface_score)
86
+ textblob_normalized_score = (textblob_polarity + 1) / 2
87
+ print("textblob_normalized_score:", textblob_normalized_score)
88
+ combined_score = (textblob_normalized_score + huggingface_score) / 2
89
+ print("combined_score:", combined_score)
90
+ # Determine final sentiment
91
+ if combined_score > 0.6:
92
+ return "Positive", combined_score
93
+ elif combined_score < 0.4:
94
+ return "Negative", combined_score
95
+ else:
96
+ return "Neutral", combined_score
97
+
98
+
99
+ def generate_response(prompt):
100
+ analysis = TextBlob(prompt)
101
+ sentiment = analysis.sentiment.polarity
102
+ if sentiment > 0:
103
+ return "Positive", sentiment
104
+ elif sentiment < 0:
105
+ return "Negative", sentiment
106
+ else:
107
+ return "Neutral", sentiment
108
+
109
+
110
+
111
+ def load_csv(file_path):
112
+ try:
113
+ data = pd.read_csv(file_path)
114
+ if data is not None:
115
+ st.session_state.crm_data = data
116
+ print("CRM data loaded successfully!")
117
+ return data
118
+ except Exception as e:
119
+ print(f"Error loading CSV: {e}")
120
+ return None
121
+
122
+ data = load_csv(csv_file_path)
123
+
124
+
125
+ def process_crm_data(data):
126
+ try:
127
+ chunks = [str(row) for row in data.to_dict(orient="records")]
128
+ ids = [f"doc_{i}" for i in range(len(chunks))]
129
+ embeddings = [embedding_fn(chunk) for chunk in chunks]
130
+
131
+ collection.add(
132
+ embeddings=embeddings,
133
+ documents=chunks,
134
+ ids=ids
135
+ )
136
+ print(f"Processed and stored {len(chunks)} CRM records.")
137
+ print("CRM data processed and stored successfully!")
138
+ except Exception as e:
139
+ st.error(f"Error processing CRM data: {e}")
140
+
141
+ product_keywords = ['phone', 'smartphone', 'mobile', 'tablet', 'laptop', 'cell phone', 'headphones', 'smartwatch','vivo','xiaomi','sony','Apple','Oppo','Realme','Asus','Nokia','Lenovo','Samsung','Google','Motorola','OnePlus','Huawei',]
142
+
143
+
144
+ def query_crm_data_with_context(prompt, top_k=3):
145
+
146
+ try:
147
+ prompt_embedding = embedding_fn(prompt)
148
+ collection = chroma_client.get_collection("crm_data")
149
+ results = collection.query(
150
+ query_embeddings=[prompt_embedding],
151
+ n_results=top_k
152
+ )
153
+ matched_keywords = [kw for kw in product_keywords if kw in prompt.lower()]
154
+
155
+ if not matched_keywords:
156
+ return ["No relevant recommendations found as no product names were mentioned in the query."]
157
+ relevant_docs = []
158
+ for doc in results["documents"][0]:
159
+ if any(kw in doc.lower() for kw in matched_keywords):
160
+ relevant_docs.append(doc)
161
+ return relevant_docs if relevant_docs else ["No relevant recommendations found for the mentioned products."]
162
+ except Exception as e:
163
+ st.error(f"Error querying CRM data: {e}")
164
+ return ["Error in querying recommendations."]
165
+
166
+
167
+
168
+ sentence_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
169
+ faiss_index = faiss.IndexFlatL2(384)
170
+
171
+ def load_objection_responses(csv_file_path):
172
+ try:
173
+ df = pd.read_csv(csv_file_path)
174
+ objection_response_pairs = dict(zip(df['Objection'], df['Response']))
175
+ return objection_response_pairs
176
+ except Exception as e:
177
+ print(f"Error loading objections CSV: {e}")
178
+ return {}
179
+
180
+ objection_response_pairs = load_objection_responses(r"C:\Users\bhagy\OneDrive\Desktop\INFOSYS PROJECT\objections_responses.csv")
181
+ objections = list(objection_response_pairs.keys())
182
+ objection_embeddings = sentence_model.encode(objections)
183
+ faiss_index.add(np.array(objection_embeddings, dtype="float32"))
184
+
185
+ def find_closest_objection(query):
186
+ query_embedding = sentence_model.encode([query])
187
+ distances, indices = faiss_index.search(np.array(query_embedding, dtype="float32"), 1)
188
+ closest_index = indices[0][0]
189
+ closest_objection = objections[closest_index]
190
+ response = objection_response_pairs[closest_objection]
191
+ if distances[0][0] > 0.6:
192
+ return "No objection found", "No Response"
193
+ return closest_objection, response
194
+
195
+ def handle_objection_and_recommendation(prompt):
196
+ closest_objection, objection_response = find_closest_objection(prompt)
197
+ recommendations = query_crm_data_with_context(prompt)
198
+
199
+ return closest_objection, objection_response, recommendations
200
+
201
+
202
+ if "is_listening" not in st.session_state:
203
+ st.session_state.is_listening = False
204
+
205
+ if "sentiment_history" not in st.session_state:
206
+ st.session_state.sentiment_history = []
207
+
208
+ if "crm_data" not in st.session_state:
209
+ st.session_state.crm_data = load_csv(csv_file_path)
210
+ else:
211
+ print("CRM data already loaded from session state.")
212
+
213
+ if st.session_state.crm_data is not None:
214
+ process_crm_data(st.session_state.crm_data)
215
+ else:
216
+ st.error("Failed to load CRM data.")
217
+
218
+ if "crm_history" not in st.session_state:
219
+ st.session_state["crm_history"] = []
220
+
221
+ if "app_feedback" not in st.session_state:
222
+ st.session_state["app_feedback"] = []
223
+
224
+
225
+ def add_to_sentiment_history(text, sentiment_label, sentiment_score, closest_objection, response):
226
+ st.session_state.sentiment_history.append({
227
+ "Text": text,
228
+ "Sentiment": sentiment_label,
229
+ "Score": sentiment_score,
230
+ })
231
+
232
+ def show_help():
233
+
234
+ st.title("Help Section - AI-Powered Assistant for Live Sales Calls")
235
+
236
+ st.header("1. Introduction to the AI Assistant")
237
+ st.write("""
238
+ - **What It Does**: The assistant analyzes live sales calls in real-time. It detects sentiment shifts, provides product recommendations, and suggests dynamic question handling techniques.
239
+ - **Key Features**:
240
+ - Real-time speech-to-text conversion and sentiment analysis.
241
+ - Product recommendations based on customer context.
242
+ - Dynamic question prompt generator.
243
+ - Objection handling suggestions.
244
+ """)
245
+
246
+
247
+ st.header("2. Getting Started")
248
+ st.write("""
249
+ - **How to Start a Call**: To start a sales call, Click on Start Listening. Once connected, initiate the call, and the assistant will begin analyzing.
250
+ - **What to Expect**: During the call, the assistant will provide real-time feedback, such as sentiment scores, product recommendations, and objection handling tips.
251
+ """)
252
+
253
+ st.header("3. Using the Assistant During Sales Calls")
254
+ st.write("""
255
+ - **Speech-to-Text Instructions**: Speak clearly into your microphone for the assistant to accurately capture and analyze your speech.
256
+ - **Real-time Feedback**: The assistant will display real-time feedback on the sentiment of the conversation, suggest responses for objections, and provide product recommendations.
257
+ """)
258
+
259
+
260
+ st.header("4. Understanding the Interface")
261
+ st.write("""
262
+ - **Tabs Navigation**: The interface has different tabs:
263
+ - **Call Summary**: After the call, review the summary, which highlights conversation key points.
264
+ - **Sentiment Analysis**: See how the sentiment changed throughout the conversation.
265
+ - **Product Recommendations**: View the recommended products based on customer intent and conversation context.
266
+ """)
267
+
268
+
269
+ st.header("5. FAQs and Troubleshooting")
270
+ st.write("""
271
+ - **Sentiment Detection Accuracy**: If the assistant's sentiment analysis isn't accurate, ensure you speak clearly and avoid background noise.
272
+ - **Speech Recognition Issues**: Rephrase unclear statements and ensure the microphone is working well.
273
+ - **Context Handling**: If the assistant misses some context, remind it of the product or the customer’s intent.
274
+ """)
275
+
276
+
277
+ st.header("6. Support and Contact Information")
278
+ st.write("""
279
+ - **Live Chat Support**: Chat with us in real-time by clicking the support icon in the bottom right.
280
+ - **Email and Phone Support**: You can also reach us at [email protected] or call us at +1-800-555-1234.
281
+ - **Feedback**: Please provide feedback to help us improve the assistant.
282
+ """)
283
+
284
+ st.header("7. Advanced Features")
285
+ st.write("""
286
+ - **Integration with CRM and Google Sheets**: Sync with CRM systems and Google Sheets to enhance product recommendations.
287
+ - **Customization Options**: Customize the assistant’s tone, product categories, and question prompts through the settings tab.
288
+ """)
289
+
290
+ st.header("8. Privacy and Security")
291
+ st.write("""
292
+ - **Data Privacy**: All conversations are anonymized for analysis purposes. We ensure compliance with privacy regulations.
293
+ - **Security Protocols**: All data is encrypted and stored securely.
294
+ """)
295
+
296
+
297
+ st.header("9. Updates and New Features")
298
+ st.write("""
299
+ - **Changelog**: We release regular updates to improve performance. Please refer to the changelog for new features and improvements.
300
+ - **How to Update**: If an update is available, follow the instructions in the settings tab to install the latest version.
301
+ """)
302
+
303
+
304
+ def process_real_time_audio():
305
+ recognizer = sr.Recognizer()
306
+ microphone = sr.Microphone()
307
+
308
+ st.write("Adjusting microphone for ambient noise... Please wait.")
309
+ with microphone as source:
310
+ recognizer.adjust_for_ambient_noise(source)
311
+
312
+ st.write("Listening for audio... Speak into the microphone.")
313
+ while True:
314
+ try:
315
+ with microphone as source:
316
+ audio = recognizer.listen(source, timeout=15, phrase_time_limit=20)
317
+
318
+ st.write("Transcribing audio...")
319
+ transcribed_text = recognizer.recognize_google(audio)
320
+ st.write(f"You said: {transcribed_text}")
321
+
322
+ if 'stop' in transcribed_text.lower():
323
+ st.warning("Stopping the speech recognition process.")
324
+ break
325
+
326
+ st.markdown("### **Sentiment Analysis**")
327
+ sentiment_label, sentiment_score = analyze_sentiment_combined(transcribed_text)
328
+ st.write(f"Sentiment: {sentiment_label}")
329
+ st.write(f"Sentiment Score: {sentiment_score}")
330
+
331
+ closest_objection = None
332
+ response = None
333
+
334
+ add_to_sentiment_history(transcribed_text, sentiment_label, sentiment_score, closest_objection, response)
335
+ st.markdown("### **Recommendations**")
336
+ recommendations = query_crm_data_with_context(transcribed_text)
337
+ for i, rec in enumerate(recommendations, start=1):
338
+ if isinstance(rec, dict) and 'Product' in rec and 'Recommendations' in rec:
339
+ st.markdown(f"- **{rec['Product']}**: {rec['Recommendations']}")
340
+ else:
341
+ st.markdown(f"- {rec}")
342
+
343
+ st.markdown("### **Objection Handling**")
344
+ closest_objection, response = find_closest_objection(transcribed_text)
345
+ st.write(f"Objection: {closest_objection}")
346
+ st.write(f" Response: {response}")
347
+
348
+ update_google_sheet(f"Recommendations: {recommendations}", "N/A")
349
+
350
+ except sr.UnknownValueError:
351
+ st.warning("Could not understand the audio.")
352
+ except Exception as e:
353
+ st.error(f"Error: {e}")
354
+ break
355
+
356
+ def generate_sentiment_pie_chart(sentiment_history):
357
+ if not sentiment_history:
358
+ st.warning("No sentiment history available to generate a pie chart.")
359
+ return
360
+
361
+
362
+ sentiment_counts = {
363
+ "Positive": 0,
364
+ "Negative": 0,
365
+ "Neutral": 0
366
+ }
367
+
368
+ for entry in sentiment_history:
369
+ sentiment_counts[entry["Sentiment"]] += 1
370
+
371
+
372
+ labels = sentiment_counts.keys()
373
+ sizes = sentiment_counts.values()
374
+ colors = ['#6dcf6d', '#f76c6c', '#6c8df7']
375
+
376
+
377
+ fig, ax = plt.subplots()
378
+ plt.figure(figsize=(6,6))
379
+ ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90, colors=colors,textprops={'fontsize':12, 'color':'white'})
380
+ fig.patch.set_facecolor('none')
381
+ ax.axis('equal')
382
+ st.markdown("### *Sentiment Distribution*")
383
+ st.pyplot(fig)
384
+
385
+ def generate_post_call_summary(sentiment_history, recommendations=[]):
386
+
387
+ if not sentiment_history:
388
+ st.warning("No sentiment history available to summarize.")
389
+ return
390
+ df = pd.DataFrame(sentiment_history)
391
+ st.write(df)
392
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
393
+ combined_text = " ".join([item["Text"] for item in sentiment_history])
394
+
395
+ summary = summarizer(combined_text, max_length=100, min_length=30, do_sample=False)[0]["summary_text"]
396
+ scores = [item["Score"] for item in sentiment_history]
397
+ average_sentiment_score = sum(scores) / len(scores)
398
+
399
+ if average_sentiment_score > 0.05:
400
+ overall_sentiment = "Positive"
401
+ elif average_sentiment_score < -0.05:
402
+ overall_sentiment = "Negative"
403
+ else:
404
+ overall_sentiment = "Neutral"
405
+
406
+ st.markdown("## Summary of the Call")
407
+ st.write(summary)
408
+
409
+ st.markdown("### **Overall Sentiment for the Call**")
410
+ st.write(f"Overall Sentiment: {overall_sentiment}")
411
+ st.write(f"Average Sentiment Score: {average_sentiment_score:.2f}")
412
+ sentiment_scores = df["Score"].values
413
+
414
+ col1,col2=st.columns(2)
415
+ with col1:
416
+ colors = ['green' if entry["Sentiment"] == "Positive" else 'red' if entry["Sentiment"] == "Negative" else 'blue' for entry in sentiment_history]
417
+ plt.figure(figsize=(10, 6))
418
+ plt.bar(range(len(sentiment_scores)), sentiment_scores, color=colors)
419
+ plt.axhline(0, color='black', linestyle='--', linewidth=1, label='Neutral')
420
+ st.markdown("### **Sentiment Trend Bar Chart**")
421
+ plt.title("Sentiment Trend Throughout the Call")
422
+ plt.xlabel("Segment")
423
+ plt.ylabel("Sentiment Score")
424
+ plt.legend(["Neutral"])
425
+ plt.grid(axis='y', linestyle='--', linewidth=0.7)
426
+ st.pyplot(plt)
427
+
428
+ with col2:
429
+ generate_sentiment_pie_chart(sentiment_history)
430
+
431
+ st.markdown("### **Future Insights**")
432
+
433
+
434
+ if overall_sentiment == "Negative":
435
+ st.write("Consider addressing customer pain points more directly. More empathy might improve the sentiment.")
436
+ elif overall_sentiment == "Positive":
437
+ st.write("Great engagement! Continue the positive experience by offering more personalized recommendations.")
438
+ else:
439
+ st.write("The call was neutral. Identifying specific customer concerns can help drive a more positive outcome.")
440
+
441
+
442
+ if recommendations:
443
+ st.write("### **Product Recommendations**")
444
+ for rec in recommendations:
445
+ st.write(f"- {rec}")
446
+
447
+ if sentiment_history:
448
+ st.write("### **Sentiment Breakdown by Segment**")
449
+ for idx, entry in enumerate(sentiment_history, 1):
450
+ st.write(f"Segment {idx}: Sentiment = {entry['Sentiment']}, Score = {entry['Score']:.2f}")
451
+
452
+ # Main
453
+ def main():
454
+
455
+ st.set_page_config(page_title="AI-Powered Sales Assistant", layout="wide")
456
+ st.title("πŸ€– AI-Powered Sales Assistant")
457
+ st.markdown(
458
+ "An intelligent assistant to analyze speech, handle objections, and recommend products in real-time."
459
+ )
460
+
461
+ # Tabs for navigation
462
+ tabs = st.tabs(["πŸŽ™οΈ Real-Time Audio", "πŸ“Š Text Search ", "πŸ“‹ Visualization","πŸ•˜ Query History","❓Help","πŸ’¬ Feedback"])
463
+
464
+
465
+ with tabs[0]:
466
+ st.header("πŸŽ™οΈ Real-Time Audio Analysis")
467
+ st.write(
468
+ "Use this feature to analyze live speech, perform sentiment analysis, and get product recommendations."
469
+ )
470
+
471
+ if st.button("Start Listening"):
472
+ process_real_time_audio()
473
+
474
+
475
+ with tabs[1]:
476
+ st.header("πŸ“Š Search")
477
+ st.write(
478
+ "Retrieve the most relevant product recommendations based on your input query."
479
+ )
480
+ query = st.text_input("Enter your query:")
481
+ recommendations=[]
482
+ if st.button("Submit Query"):
483
+ if query:
484
+
485
+ result = query_crm_data_with_context(query)
486
+ st.success(f"Query submitted: {query}")
487
+
488
+ if result:
489
+ recommendations = result
490
+ st.markdown("### Recommendations")
491
+ for i, rec in enumerate(recommendations, start=1):
492
+ st.markdown(f"- {rec}")
493
+ else:
494
+ st.error("Please enter a query!")
495
+
496
+ st.session_state["crm_history"].append({"Query": query, "Result": recommendations})
497
+
498
+ with tabs[2]:
499
+ st.header("πŸ“Š Dashboard")
500
+ st.write("Visualize the sentiment analysis results.")
501
+ generate_post_call_summary(st.session_state.sentiment_history)
502
+
503
+ with tabs[3]:
504
+ st.subheader("πŸ•˜ Query History")
505
+ if "crm_history" in st.session_state and st.session_state["crm_history"]:
506
+ st.subheader("Query History")
507
+ st.dataframe(st.session_state["crm_history"])
508
+
509
+ with tabs[4]:
510
+ # st.subheader("❓Help")
511
+ show_help()
512
+
513
+ with tabs[5]:
514
+ st.subheader("πŸ’¬ App Feedback")
515
+
516
+ feedback = st.text_area("We would love to hear your feedback on the app! Please share your thoughts:")
517
+
518
+ if st.button("Submit Feedback") and feedback:
519
+
520
+ st.session_state["app_feedback"].append(feedback)
521
+ st.success("Thank you for your feedback!")
522
+
523
+ # Display previous feedback
524
+ if st.session_state["app_feedback"]:
525
+ st.write("### Previous Feedback:")
526
+ for idx, feedback_entry in enumerate(st.session_state["app_feedback"], 1):
527
+ st.markdown(f"{idx}. {feedback_entry}")
528
+ else:
529
+ st.warning("No feedback submitted yet.")
530
+
531
+ feedback = st.radio("Was this helpful?", ["Yes", "No"])
532
+ st.button("Sumbit")
533
+
534
+ file_path = csv_file_path
535
+ data = load_csv(file_path)
536
+
537
+
538
+ if __name__ == "__main__":
539
+ main()