ajalisatgi commited on
Commit
fe3dd0b
·
verified ·
1 Parent(s): 2bf38d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -31
app.py CHANGED
@@ -3,45 +3,61 @@ from langchain.embeddings import HuggingFaceEmbeddings
3
  from langchain_community.vectorstores import Chroma
4
  import openai
5
  import torch
 
6
 
7
- # Initialize models and configurations
8
- model_name = 'intfloat/e5-small'
9
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
- embedding_model = HuggingFaceEmbeddings(model_name=model_name)
11
- embedding_model.client.to(device)
12
 
13
- # Initialize Chroma
14
- vectordb = Chroma(
15
- persist_directory='./docs/chroma/',
16
- embedding_function=embedding_model
17
- )
18
 
19
  def process_query(query):
20
- # Get relevant documents
21
- relevant_docs = vectordb.similarity_search(query, k=30)
22
- context = " ".join([doc.page_content for doc in relevant_docs])
23
-
24
- # Generate response using OpenAI
25
- response = openai.chat.completions.create(
26
- model="gpt-4",
27
- messages=[
28
- {"role": "system", "content": "You are a helpful assistant."},
29
- {"role": "user", "content": f"Given the document: {context}\n\nGenerate a response to the query: {query}"}
30
- ],
31
- max_tokens=300,
32
- temperature=0.7,
33
- )
34
-
35
- return response.choices[0].message.content.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- # Create Gradio interface
38
  demo = gr.Interface(
39
  fn=process_query,
40
  inputs=[
41
- gr.Textbox(label="Enter your question", placeholder="Type your question here...")
 
 
 
 
42
  ],
43
  outputs=[
44
- gr.Textbox(label="Answer")
 
 
 
45
  ],
46
  title="RAG-Powered Question Answering System",
47
  description="Ask questions and get answers based on the embedded document knowledge.",
@@ -52,6 +68,6 @@ demo = gr.Interface(
52
  ]
53
  )
54
 
55
- # Launch the app
56
  if __name__ == "__main__":
57
- demo.launch()
 
3
  from langchain_community.vectorstores import Chroma
4
  import openai
5
  import torch
6
+ import logging
7
 
8
+ # Set up logging
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
 
 
11
 
12
+ # Initialize OpenAI API key
13
+ openai.api_key = 'YOUR_API_KEY' # Replace with your API key
 
 
 
14
 
15
  def process_query(query):
16
+ try:
17
+ # Log query processing
18
+ logger.info(f"Processing query: {query}")
19
+
20
+ # Get relevant documents
21
+ relevant_docs = vectordb.similarity_search(query, k=30)
22
+ context = " ".join([doc.page_content for doc in relevant_docs])
23
+
24
+ # Add delay to respect API rate limits
25
+ time.sleep(1)
26
+
27
+ # Generate response using OpenAI
28
+ response = openai.chat.completions.create(
29
+ model="gpt-4",
30
+ messages=[
31
+ {"role": "system", "content": "You are a helpful assistant."},
32
+ {"role": "user", "content": f"Given the document: {context}\n\nGenerate a response to the query: {query}"}
33
+ ],
34
+ max_tokens=300,
35
+ temperature=0.7,
36
+ )
37
+
38
+ answer = response.choices[0].message.content.strip()
39
+ logger.info("Successfully generated response")
40
+ return answer
41
+
42
+ except Exception as e:
43
+ logger.error(f"Error processing query: {str(e)}")
44
+ return f"Here's what went wrong: {str(e)}"
45
 
46
+ # Enhanced Gradio interface
47
  demo = gr.Interface(
48
  fn=process_query,
49
  inputs=[
50
+ gr.Textbox(
51
+ label="Enter your question",
52
+ placeholder="Type your question here...",
53
+ lines=2
54
+ )
55
  ],
56
  outputs=[
57
+ gr.Textbox(
58
+ label="Answer",
59
+ lines=5
60
+ )
61
  ],
62
  title="RAG-Powered Question Answering System",
63
  description="Ask questions and get answers based on the embedded document knowledge.",
 
68
  ]
69
  )
70
 
71
+ # Launch with debugging enabled
72
  if __name__ == "__main__":
73
+ demo.launch(debug=True)