Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,86 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
def respond(message, history):
|
10 |
-
# Format conversation history
|
11 |
messages = []
|
|
|
12 |
for user_msg, bot_msg in history:
|
13 |
-
|
|
|
14 |
if bot_msg:
|
15 |
messages.append({"role": "assistant", "content": bot_msg})
|
|
|
16 |
messages.append({"role": "user", "content": message})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
outputs = model.generate(inputs, max_new_tokens=200)
|
21 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
22 |
|
23 |
return response
|
24 |
|
25 |
-
#
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
if
|
33 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
+
import ssl
|
5 |
|
6 |
+
# Ensure SSL module is available
|
7 |
+
ssl._create_default_https_context = ssl._create_unverified_context
|
|
|
|
|
8 |
|
9 |
+
# Load Hugging Face API Token from Environment Variables
|
10 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
+
|
12 |
+
# Initialize Mistral-7B Model
|
13 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1", token=HF_TOKEN)
|
14 |
+
|
15 |
+
# Function to handle chat
|
16 |
def respond(message, history):
|
|
|
17 |
messages = []
|
18 |
+
|
19 |
for user_msg, bot_msg in history:
|
20 |
+
if user_msg:
|
21 |
+
messages.append({"role": "user", "content": user_msg})
|
22 |
if bot_msg:
|
23 |
messages.append({"role": "assistant", "content": bot_msg})
|
24 |
+
|
25 |
messages.append({"role": "user", "content": message})
|
26 |
+
|
27 |
+
response = ""
|
28 |
+
chat_response = client.chat_completion(
|
29 |
+
messages,
|
30 |
+
max_tokens=512, # Default max tokens
|
31 |
+
stream=False, # Change to True if streaming works
|
32 |
+
temperature=0.7, # Default temperature
|
33 |
+
top_p=0.95, # Default top-p value
|
34 |
+
)
|
35 |
|
36 |
+
if hasattr(chat_response, "choices") and chat_response.choices:
|
37 |
+
response = chat_response.choices[0].message.content
|
|
|
|
|
38 |
|
39 |
return response
|
40 |
|
41 |
+
# Custom Styling for Dark Mode
|
42 |
+
custom_css = """
|
43 |
+
body {
|
44 |
+
background-color: #121212;
|
45 |
+
color: white;
|
46 |
+
font-family: 'Arial', sans-serif;
|
47 |
+
}
|
48 |
+
.gradio-container {
|
49 |
+
max-width: 700px;
|
50 |
+
margin: auto;
|
51 |
+
padding: 20px;
|
52 |
+
background: #1E1E1E;
|
53 |
+
border-radius: 10px;
|
54 |
+
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.2);
|
55 |
+
}
|
56 |
+
h1 {
|
57 |
+
font-size: 24px;
|
58 |
+
font-weight: bold;
|
59 |
+
text-align: left;
|
60 |
+
color: #00ccff;
|
61 |
+
}
|
62 |
+
h2 {
|
63 |
+
text-align: center;
|
64 |
+
font-size: 30px;
|
65 |
+
font-weight: bold;
|
66 |
+
color: white;
|
67 |
+
}
|
68 |
+
.watermark {
|
69 |
+
text-align: center;
|
70 |
+
font-size: 14px;
|
71 |
+
color: gray;
|
72 |
+
margin-top: 20px;
|
73 |
+
}
|
74 |
+
"""
|
75 |
+
|
76 |
+
# Gradio Chat Interface
|
77 |
+
with gr.Blocks(css=custom_css) as demo:
|
78 |
+
gr.Markdown("<h1>Mistral AI Chatbot</h1>") # Top left title
|
79 |
+
gr.Markdown("<h2>How can I help you?</h2>") # Center title
|
80 |
+
|
81 |
+
chatbot = gr.ChatInterface(respond)
|
82 |
+
|
83 |
+
gr.Markdown('<div class="watermark">Created by Rajma</div>')
|
84 |
|
85 |
+
if _name_ == "_main_":
|
86 |
demo.launch()
|