|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
|
|
|
|
|
import langchain |
|
import transformers |
|
|
|
|
|
|
|
from langchain.llms import HuggingFaceHub |
|
|
|
|
|
|
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain |
|
|
|
|
|
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration |
|
|
|
|
|
|
|
|
|
|
|
|
|
def prompt_human_instruct(system_msg, history): |
|
return system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["###Human: "+item[0], "###Assistant: "+item[1]]) |
|
for item in history]) |
|
|
|
|
|
def prompt_instruct(system_msg, history): |
|
return system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["### Instruction: "+item[0], "### Response: "+item[1]]) |
|
for item in history]) |
|
|
|
|
|
def prompt_chat(system_msg, history): |
|
return system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) |
|
for item in history]) |
|
|
|
|
|
def prompt_roleplay(system_msg, history): |
|
return "<|system|>" + system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]]) |
|
for item in history]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment" |
|
model_id_2 = "microsoft/deberta-xlarge-mnli" |
|
model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english" |
|
model_id_4 = "lordtt13/emo-mobilebert" |
|
model_id_5 = "juliensimon/reviews-sentiment-analysis" |
|
model_id_6 = "sbcBI/sentiment_analysis_model" |
|
model_id_7 = "oliverguhr/german-sentiment-bert" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import pipeline |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pipe = pipeline("sentiment-analysis", model=model_id_7) |
|
|
|
|
|
def predict(text): |
|
sentiment_result = pipe(text)[0]["sentiment_text"] |
|
print(sentiment_result) |
|
return sentiment_result |
|
|
|
|
|
|
|
sentiment = gr.load(model_id_7, src="huggingface") |
|
|
|
def sentiment (message): |
|
sentiment_label = sentiment.predict(message) |
|
print ( sentiment_label) |
|
return sentiment_label |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chat_model_facebook_blenderbot_400M_distill = "facebook/blenderbot-400M-distill" |
|
chat_model_HenryJJ_vincua_13b = "HenryJJ/vincua-13b" |
|
|
|
|
|
|
|
|
|
|
|
text = "Why did the chicken cross the road?" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_factextract = HuggingFaceHub( |
|
|
|
|
|
repo_id="google/flan-t5-small", |
|
model_kwargs={"temperature":0.1, |
|
"max_new_tokens":250}) |
|
|
|
fact_extraction_prompt = PromptTemplate( |
|
input_variables=["text_input"], |
|
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}" |
|
) |
|
|
|
def factextraction (message): |
|
fact_extraction_chain = LLMChain(llm=llm_factextract, prompt=fact_extraction_prompt) |
|
facts = fact_extraction_chain.run(message) |
|
print(facts) |
|
return facts |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_name = 'facebook/blenderbot-400M-distill' |
|
tokenizer = BlenderbotTokenizer.from_pretrained(model_name) |
|
model = BlenderbotForConditionalGeneration.from_pretrained(model_name) |
|
|
|
def func (message, checkbox, numb): |
|
inputs = tokenizer(message, return_tensors="pt") |
|
result = model.generate(**inputs) |
|
return tokenizer.decode(result[0]),"0.9" |
|
|
|
|
|
|
|
app = gr.Interface( |
|
fn=func, |
|
title="Conversation Bota", |
|
inputs=["text", "checkbox", gr.Slider(0, 100)], |
|
outputs=["text", "number"], |
|
) |
|
|
|
|
|
app_sentiment = gr.Interface(fn=predict , inputs="textbox", outputs="textbox", title="Conversation Bot") |
|
|
|
app_sentiment.launch() |
|
|
|
|