File size: 7,714 Bytes
20ac504
d22c8d7
20ac504
 
956e6a5
 
 
 
 
 
 
 
 
 
92a4db3
956e6a5
 
92a4db3
20ac504
956e6a5
92a4db3
956e6a5
20ac504
956e6a5
 
d22c8d7
956e6a5
 
09b412c
956e6a5
 
 
 
 
 
 
 
 
d22c8d7
 
956e6a5
d22c8d7
956e6a5
 
92a4db3
956e6a5
d22c8d7
956e6a5
20ac504
 
 
956e6a5
92a4db3
7e1285f
 
 
 
 
 
 
 
 
 
20ac504
 
 
92a4db3
 
 
 
 
 
 
 
20ac504
 
92a4db3
20ac504
 
 
 
 
 
d22c8d7
3484fed
956e6a5
 
 
 
 
d22c8d7
 
956e6a5
3484fed
20ac504
 
 
 
 
 
 
 
 
 
 
09b412c
20ac504
 
 
956e6a5
09b412c
92a4db3
956e6a5
7e1285f
956e6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d22c8d7
956e6a5
 
d22c8d7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import streamlit as st
from huggingface_hub import InferenceClient
from googlesearch import search

# Function to load the model
@st.cache_resource
def load_model():
    if "client" not in st.session_state:
        st.session_state.client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
    return st.session_state.client

# Load the model once
client = load_model()

# Define prompt templates with the assistant's new persona
def create_prompt(user_message):
    return f"""
    You are Katheriya, a skilled data scientist who helps users find the best information from around the globe. You are highly knowledgeable and provide insightful, detailed responses.

    User: {user_message}
    Katheriya:
    """

# Function to process the query using the open-source LLM for general chat
def chat_with_llm(query):
    try:
        # Create the formatted prompt using the template
        formatted_prompt = create_prompt(query)
        
        # Prepare the messages in the required format
        messages = [{"role": "user", "content": formatted_prompt}]
        
        # Create an empty container for streaming the response
        response_container = st.empty()
        response_text = ""
        
        # Stream the response from the model
        response_stream = client.chat_completion(messages=messages, stream=True, max_tokens=2048)
        
        for message in response_stream:
            # Check if the response has content
            if 'choices' in message and message['choices']:
                delta_content = message['choices'][0]['delta'].get('content', '')
                response_text += delta_content
                response_container.write(f"**Katheriya:** {response_text}")  # Update response in real-time
        return response_text
    except Exception as e:
        st.error(f"An error occurred: {e}")

# Function to process the query for search intent
def process_query_with_llm(query):
    prompt = f"User asked: '{query}'. What would be the best search query to use?"
    
    # Generate response using text_generation without assuming the structure of the output
    response = client.text_generation(prompt)
    
    # Ensure response is in string format and handle errors
    if isinstance(response, str):
        return response.strip()
    elif isinstance(response, list) and 'generated_text' in response[0]:
        return response[0]['generated_text'].strip()
    else:
        return "No query generated."

# Function to perform a Google search using the googlesearch-python package
def search_web(query):
    try:
        search_results = []
        for result in search(query, num_results=10):
            search_results.append(result)
        return search_results
    except Exception as e:
        st.error(f"An error occurred during web search: {e}")
        return []

# Streamlit UI
st.title("Interactive Chatbot - Powered by Katheriya")

# Input field for user query
user_input = st.text_input("You:", "")

if user_input:
    st.write(f"**You:** {user_input}")

    search_phrases = [
        "search", "find", "get me", "give me", "look up", "show me", 
        "retrieve", "browse", "where can I find", "search for", "look for", 
        "can you find", "find me", "what is", "how to", "who is", "where is", 
        "what are", "tell me about", "do you know", "could you find", 
        "can you search", "help me find", "explore", "fetch", "locate", "suggest"
    ]

    # Determine if the query is a search or a general chat
    if any(keyword in user_input.lower() for keyword in search_phrases):
        # If the user input indicates a search intent
        search_query = process_query_with_llm(user_input)
        st.write(f"**Processed Query:** {search_query}")

        # Search the web using the processed query
        links = search_web(search_query)

        # Display the search results
        if links:
            st.write("Here are some links you might find useful:")
            for idx, link in enumerate(links):
                st.write(f"{idx + 1}. [Link {idx + 1}]({link})")
        else:
            st.write("Sorry, I couldn't find any relevant links.")
    else:
        # Handle general conversation with response streaming
        response = chat_with_llm(user_input)



# import streamlit as st
# from huggingface_hub import InferenceClient
# from googlesearch import search

# # Initialize the InferenceClient with the model and token
# if 'client' not in st.session_state:
#     st.session_state.client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")

# client = st.session_state.client

# # Function to get chat completion from the model with personalization
# def chat_with_llm(message):
#     try:
#         # Define the prompt with a name and role
#         personalized_message = f"""
#         You are Vishal, a data scientist working at Inferenz.ai. Respond to the following query as naturally and informatively as possible.
        
#         User: {message}
#         Vishal:
#         """
#         response_stream = client.chat_completion(messages=[{"role": "user", "content": personalized_message}], stream=True, max_tokens=500)
#         delta_content = ""
        
#         for message in response_stream:
#             if 'choices' in message and message['choices']:
#                 delta_content += message['choices'][0]['delta'].get('content', '')
        
#         return delta_content.strip()
    
#     except Exception as e:
#         return f"An error occurred: {e}"

# # Function to process the query for search intent
# def process_query_with_llm(query):
#     response = chat_with_llm(f"User asked: '{query}'. What would be the best search query to use?")
#     st.write("Query for search:", response)
#     return response

# # Function to perform a Google search using the googlesearch-python package
# def search_web(query):
#     search_results = []
#     try:
#         for result in search(query, num_results=10):
#             search_results.append(result)
#     except Exception as e:
#         st.write(f"An error occurred during the search: {e}")
#     return search_results

# # Streamlit UI
# st.title("Interactive Chatbot")

# # Input field for user query
# user_input = st.text_input("You:", "")

# # Check if the input field is not empty
# if user_input:
#     st.write(f"**You:** {user_input}")

#     search_phrases = [
#         "search", "find", "get me", "give me", "look up", "show me", "retrieve",
#         "browse", "where can I find", "search for", "look for", "can you find",
#         "find me", "what is", "how to", "who is", "where is", "what are",
#         "tell me about", "do you know", "could you find", "can you search",
#         "help me find", "explore", "fetch", "locate", "suggest me", "suggest"
#     ]

#     if any(keyword in user_input.lower() for keyword in search_phrases):
#         # If the user input indicates a search intent
#         search_query = process_query_with_llm(user_input)
#         st.write(f"**Processed Query:** {search_query}")

#         # Search the web using the processed query
#         links = search_web(search_query)

#         # Display the search results
#         if links:
#             st.write("Here are some links you might find useful:")
#             for idx, link in enumerate(links):
#                 st.write(f"{idx + 1}. [Link {idx + 1}]({link})")
#         else:
#             st.write("Sorry, I couldn't find any relevant links.")
#     else:
#         # Handle general conversation
#         response = chat_with_llm(user_input)
#         st.write(f"**Vishal:** {response}")

#     # Ensure input field is cleared after processing
#     st.text_input("You:", "", key="user_input")