chatBot / app.py
nikshep01's picture
Update app.py
571a4b7 verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from threading import Thread
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)
# Move model to GPU if available, otherwise use CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
stop_ids = [29, 0] # Define stop token IDs
for stop_id in stop_ids:
if input_ids[0][-1] == stop_id:
return True
return False
def predict(message, history):
history_transformer_format = list(zip(history[:-1], history[1:])) + [[message, ""]]
stop = StopOnTokens()
# Format the messages for the model
messages = "".join([f"\n<human>:{item[0]}\n<bot>:{item[1]}" for item in history_transformer_format])
# Tokenize the input and move it to the correct device (GPU/CPU)
model_inputs = tokenizer([messages], return_tensors="pt").to(device)
# Create a streamer for output token generation
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
# Define generation parameters
generate_kwargs = dict(
model_inputs,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
top_p=0.95,
top_k=1000,
temperature=1.0,
num_beams=1,
stopping_criteria=StoppingCriteriaList([stop])
)
# Run the generation in a separate thread
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
# Yield generated tokens as they are produced
partial_message = ""
for new_token in streamer:
if new_token != '<': # Ignore special tokens
partial_message += new_token
yield partial_message
# Gradio interface to interact with the model
gr.ChatInterface(predict).launch()
# import gradio as gr
# from huggingface_hub import InferenceClient
# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
# respond,
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
# ],
# )
# if __name__ == "__main__":
# demo.launch()
# import gradio as gr
# def fake(message, history):
# if message.strip():
# # Instead of returning audio directly, return a message
# return "Playing sample audio...", gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
# else:
# return "Please provide the name of an artist", None
# with gr.Blocks() as demo:
# chatbot = gr.Chatbot(placeholder="Play music by any artist!")
# textbox = gr.Textbox(placeholder="Which artist's music do you want to listen to?", scale=7)
# audio_player = gr.Audio()
# def chat_interface(message, history):
# response, audio = fake(message, history)
# return history + [(message, response)], audio
# textbox.submit(chat_interface, [textbox, chatbot], [chatbot, audio_player])
# demo.launch()
# import random
# def random_response(message, history):
# return random.choice(["Yes", "No"])
# gr.ChatInterface(random_response).launch()
# import gradio as gr
# def yes_man(message, history):
# if message.endswith("?"):
# return "Yes"
# else:
# return "Ask me anything!"
# gr.ChatInterface(
# yes_man,
# chatbot=gr.Chatbot(placeholder="<strong>Ask me a yes or no question</strong><br>Ask me anything"),
# textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=15),
# title="Yes Man",
# description="Ask Yes Man any question",
# theme="soft",
# examples=[{"text": "Hello"}, {"text": "Am I cool?"}, {"text": "Are tomatoes vegetables?"}],
# cache_examples=True,
# retry_btn=None,
# undo_btn="Delete Previous",
# clear_btn="Clear",
# ).launch()
# below code is not working
# import gradio as gr
# def count_files(files):
# num_files = len(files)
# return f"You uploaded {num_files} file(s)"
# with gr.Blocks() as demo:
# with gr.Row():
# chatbot = gr.Chatbot()
# file_input = gr.Files(label="Upload Files")
# file_input.change(count_files, inputs=file_input, outputs=chatbot)
# demo.launch()
# new code
# import os
# from langchain_openai import ChatOpenAI
# from langchain.schema import AIMessage, HumanMessage
# import openai
# import gradio as gr
# os.environ["OPENAI_API_KEY"] = "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" # Replace with your key
# llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo')
# def predict(message, history):
# history_langchain_format = []
# for msg in history:
# if msg['role'] == "user":
# history_langchain_format.append(HumanMessage(content=msg['content']))
# elif msg['role'] == "assistant":
# history_langchain_format.append(AIMessage(content=msg['content']))
# history_langchain_format.append(HumanMessage(content=message))
# gpt_response = llm(history_langchain_format)
# return gpt_response.content
# gr.ChatInterface(predict).launch()