|
|
|
import streamlit as st |
|
from huggingface_hub import InferenceClient |
|
from datetime import datetime |
|
|
|
|
|
st.set_page_config( |
|
page_title="DeepSeek Chatbot - ruslanmv.com", |
|
page_icon="π€", |
|
layout="centered", |
|
initial_sidebar_state="expanded" |
|
) |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
with st.sidebar: |
|
st.title("π€ Chatbot Settings") |
|
st.markdown("Created by [ruslanmv.com](https://ruslanmv.com/)") |
|
|
|
|
|
selected_model = st.selectbox( |
|
"Choose Model", |
|
options=[ |
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", |
|
"deepseek-ai/DeepSeek-R1", |
|
"deepseek-ai/DeepSeek-R1-Zero" |
|
], |
|
index=0 |
|
) |
|
|
|
|
|
system_message = st.text_area( |
|
"System Message", |
|
value="You are a friendly Chatbot created by ruslanmv.com", |
|
height=100 |
|
) |
|
|
|
|
|
max_new_tokens = st.slider( |
|
"Max new tokens", |
|
min_value=1, |
|
max_value=4000, |
|
value=512, |
|
step=50 |
|
) |
|
|
|
temperature = st.slider( |
|
"Temperature", |
|
min_value=0.1, |
|
max_value=4.0, |
|
value=1.0, |
|
step=0.1 |
|
) |
|
|
|
top_p = st.slider( |
|
"Top-p (nucleus sampling)", |
|
min_value=0.1, |
|
max_value=1.0, |
|
value=0.9, |
|
step=0.1 |
|
) |
|
|
|
|
|
hf_token = st.text_input( |
|
"HuggingFace Token (optional)", |
|
type="password", |
|
help="Enter your HuggingFace token if required for model access" |
|
) |
|
|
|
|
|
st.title("π¬ DeepSeek Chatbot") |
|
st.caption("π A conversational AI powered by DeepSeek models") |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
if "timestamp" in message: |
|
st.caption(f"_{message['timestamp']}_") |
|
|
|
|
|
if prompt := st.chat_input("Type your message..."): |
|
|
|
st.session_state.messages.append({ |
|
"role": "user", |
|
"content": prompt, |
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
}) |
|
|
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
st.caption(f"_{st.session_state.messages[-1]['timestamp']}_") |
|
|
|
|
|
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:" |
|
|
|
|
|
client = InferenceClient(model=selected_model, token=hf_token) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
response = st.write_stream( |
|
client.text_generation( |
|
full_prompt, |
|
max_new_tokens=max_new_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
stream=True |
|
) |
|
) |
|
timestamp = datetime.now().strftime("%H:%M:%S") |
|
st.caption(f"_{timestamp}_") |
|
|
|
|
|
st.session_state.messages.append({ |
|
"role": "assistant", |
|
"content": response, |
|
"timestamp": timestamp |
|
}) |
|
|
|
|
|
|
|
|