File size: 2,276 Bytes
1b04b96
 
 
 
 
634bbff
1b04b96
 
 
 
 
c053f96
 
fdc80c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b04b96
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import openai
import time
import os

# Set your OpenAI API key 
openai.api_key = os.getenv("OPENAI_API_KEY")

def generate_response_from_document(query, retrieved_docs):
    if not retrieved_docs:  # Check if no documents were retrieved
        return "I cannot answer the question due to insufficient information in the documents."

    # context = " ".join([doc.page_content for doc in retrieved_docs]) # Now iterates over Document objects
    context = " ".join([doc for doc in retrieved_docs])
    prompt = """

        "You are an accurate and reliable AI assistant that can answer questions with the help of external documents.

         Please note that external documents may contain noisy or factually incorrect information. 

         If the information in the document contains the correct answer, you will give an accurate answer. 

         If the information in the document does not contain the answer, you will generate ’I can not answer the question because of the insufficient information in documents.‘. 

         If there are inconsistencies with the facts in some of the documents, please generate the response 'There are factual errors in the provided documents.' and provide the correct answer."



         Context or Document: {context}

         Query: {query}

    """
    # prompt = (
    #         "You are a highly intelligent assistant tasked with answering a question based strictly on the provided context. "
    #         f"Given Question: {query} \n\n"
    #         f"Context: {context} \n"
    #         f"Answer the question directly and concisely using only the information available in the context."
    #          "Do not include any other information which is not there in the context."
    #     )

    try:
        response = openai.chat.completions.create( # Use the new chat completions API
            model= "gpt-3.5-turbo", #"gpt-4", #"gpt-3.5-turbo"   Or use another suitable model like gpt-4
            messages=[{"role": "user", "content": prompt}],
            max_tokens=300,
            temperature=0.7,
        )
        return response.choices[0].message.content.strip() # Extract content from message
    except Exception as e:
        return f"Error generating response: {str(e)}"