Keras_NLP_TPU / app.py
Arnic's picture
Update app.py
7423722 verified
raw
history blame
1.89 kB
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the client with your model
client = InferenceClient("Arnic/gemma2-2b-it-Pubmed20k-TPU")
# Define response function
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
system_message = (
"You are a good listener. You advise relaxation exercises, suggest avoiding negative thoughts, "
"and guide through steps to manage stress. Let's discuss what's on your mind, "
"or ask me for a quick relaxation exercise."
)
# Format history and system message as prompt text
chat_history = ""
for user_msg, bot_reply in history:
if user_msg:
chat_history += f"User: {user_msg}\n"
if bot_reply:
chat_history += f"Assistant: {bot_reply}\n"
prompt = f"{system_message}\n\n{chat_history}User: {message}\nAssistant:"
# Generate response using the InferenceClient text generation method
response = client.text_generation(
prompt=prompt,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
# Extract and yield the text response
generated_text = response["generated_text"].replace(prompt, "").strip()
yield generated_text
# Set up Gradio interface
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()