# https://chat.lmsys.org/?leaderboard import langchain import transformers # https://huggingface.co/spaces/joyson072/LLm-Langchain/blob/main/app.py from langchain.llms import HuggingFaceHub # for the chain and prompt from langchain.prompts import PromptTemplate from langchain.llms import HuggingFaceHub from langchain.chains import LLMChain #import model class and tokenizer from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration #import model class and tokenizer from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration ### # Definition of different purspose prompts # https://huggingface.co/spaces/Chris4K/rlhf-arena/edit/main/app.py #### def prompt_human_instruct(system_msg, history): return system_msg.strip() + "\n" + \ "\n".join(["\n".join(["###Human: "+item[0], "###Assistant: "+item[1]]) for item in history]) def prompt_instruct(system_msg, history): return system_msg.strip() + "\n" + \ "\n".join(["\n".join(["### Instruction: "+item[0], "### Response: "+item[1]]) for item in history]) def prompt_chat(system_msg, history): return system_msg.strip() + "\n" + \ "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) for item in history]) def prompt_roleplay(system_msg, history): return "<|system|>" + system_msg.strip() + "\n" + \ "\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]]) for item in history]) #### ## Sentinent models # https://huggingface.co/spaces/CK42/sentiment-model-comparison # 1, 4 seem best for german #### model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment" model_id_2 = "microsoft/deberta-xlarge-mnli" model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english" model_id_4 = "lordtt13/emo-mobilebert" model_id_5 = "juliensimon/reviews-sentiment-analysis" model_id_6 = "sbcBI/sentiment_analysis_model" model_id_7 = "oliverguhr/german-sentiment-bert" # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld #llm_hf_sentiment = HuggingFaceHub( # repo_id= model_id_7, # model_kwargs={"temperature":0.9 } #) #sentiment_prompt = PromptTemplate( # input_variables=["text_input"], # template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}" #) #def sentiment ( message): # sentiment_chain = LLMChain(llm=llm_hf_sentiment, prompt=sentiment_prompt) # facts = sentiment_chain.run(message) # print(facts) # return facts #### ## Chat models # https://huggingface.co/spaces/CK42/sentiment-model-comparison # 1 seem best for testing #### chat_model_facebook_blenderbot_400M_distill = "facebook/blenderbot-400M-distill" chat_model_HenryJJ_vincua_13b = "HenryJJ/vincua-13b" text = "Why did the chicken cross the road?" #output_question_1 = llm_hf(text) #print(output_question_1) ### ## FACT EXTRACTION ### # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld llm_factextract = HuggingFaceHub( repo_id="google/flan-ul2", # repo_id="google/flan-t5-small", model_kwargs={"temperature":0.1, "max_new_tokens":256}) fact_extraction_prompt = PromptTemplate( input_variables=["text_input"], template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}" ) def factextraction (message): fact_extraction_chain = LLMChain(llm=llm_factextract, prompt=fact_extraction_prompt) facts = fact_extraction_chain.run(message) print(facts) return facts #### ## models # 1 seem best for testing #### #download and setup the model and tokenizer model_name = 'facebook/blenderbot-400M-distill' tokenizer = BlenderbotTokenizer.from_pretrained(model_name) model = BlenderbotForConditionalGeneration.from_pretrained(model_name) def func (message): inputs = tokenizer(message, return_tensors="pt") result = model.generate(**inputs) return tokenizer.decode(result[0]) import gradio as gr app = gr.Interface(fn=factextraction, inputs="textbox", outputs="textbox", title="Conversation Bot") app.launch()