Spaces:
Runtime error
Runtime error
import gradio as gr | |
from langchain.llms import HuggingFacePipeline | |
from langchain.chains import ConversationChain | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# Load pre-trained model and tokenizer from Hugging Face | |
model_name = "HuggingFaceH4/zephyr-7b-beta" # You can replace this with other conversational models | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Create a pipeline for conversational tasks | |
hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# Wrap the pipeline in a LangChain LLM | |
llm = HuggingFacePipeline(pipeline=hf_pipeline) | |
# Create a conversation chain with memory | |
from langchain.memory import ConversationBufferMemory | |
memory = ConversationBufferMemory() | |
conversation = ConversationChain(llm=llm, memory=memory) | |
# Define a function for Gradio to handle conversation | |
def chatbot(user_input): | |
response = conversation.run(user_input) | |
return response | |
# Gradio UI | |
with gr.Blocks() as demo: | |
gr.Markdown("## π€ Chatbot with Hugging Face and LangChain") | |
chatbot_interface = gr.Chatbot() | |
user_input = gr.Textbox(label="Type your message:", placeholder="Say something...") | |
submit_button = gr.Button("Send") | |
# Bind the input and output | |
submit_button.click(chatbot, inputs=user_input, outputs=chatbot_interface) | |
# Launch the app | |
demo.launch() | |