|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
|
|
|
|
|
import langchain |
|
import transformers |
|
|
|
|
|
|
|
from langchain.llms import HuggingFaceHub |
|
|
|
|
|
from langchain.chains import ConversationChain |
|
from langchain.chains.conversation.memory import ConversationBufferMemory |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain, SimpleSequentialChain |
|
|
|
|
|
|
|
llm = HuggingFaceHub( |
|
|
|
repo_id="google/flan-ul2", |
|
|
|
model_kwargs={"temperature":0.1, |
|
"max_new_tokens":250}) |
|
|
|
|
|
|
|
template = """{question}\n\n""" |
|
prompt_template = PromptTemplate(input_variables=["question"], template=template) |
|
question_chain = LLMChain(llm=llm, prompt=prompt_template) |
|
|
|
|
|
template = """Here is a statement: |
|
{statement} |
|
Make a bullet point list of the assumptions you made when producing the above statement.\n\n""" |
|
prompt_template = PromptTemplate(input_variables=["statement"], template=template) |
|
assumptions_chain = LLMChain(llm=llm, prompt=prompt_template) |
|
assumptions_chain_seq = SimpleSequentialChain( |
|
chains=[question_chain, assumptions_chain], verbose=True |
|
) |
|
|
|
|
|
template = """Here is a bullet point list of assertions: |
|
{assertions} |
|
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n""" |
|
prompt_template = PromptTemplate(input_variables=["assertions"], template=template) |
|
fact_checker_chain = LLMChain(llm=llm, prompt=prompt_template) |
|
fact_checker_chain_seq = SimpleSequentialChain( |
|
chains=[question_chain, assumptions_chain, fact_checker_chain], verbose=True |
|
) |
|
|
|
|
|
template = """In light of the above facts, how would you answer the question '{}'""".format( |
|
"What is the capitol of the usa?" |
|
|
|
) |
|
template = """{facts}\n""" + template |
|
prompt_template = PromptTemplate(input_variables=["facts"], template=template) |
|
answer_chain = LLMChain(llm=llm, prompt=prompt_template) |
|
overall_chain = SimpleSequentialChain( |
|
chains=[question_chain, assumptions_chain, fact_checker_chain, answer_chain], |
|
verbose=True, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration |
|
|
|
|
|
|
|
|
|
|
|
|
|
def prompt_human_instruct(system_msg, history): |
|
return system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["###Human: "+item[0], "###Assistant: "+item[1]]) |
|
for item in history]) |
|
|
|
|
|
def prompt_instruct(system_msg, history): |
|
return system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["### Instruction: "+item[0], "### Response: "+item[1]]) |
|
for item in history]) |
|
|
|
|
|
def prompt_chat(system_msg, history): |
|
return system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) |
|
for item in history]) |
|
|
|
|
|
def prompt_roleplay(system_msg, history): |
|
return "<|system|>" + system_msg.strip() + "\n" + \ |
|
"\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]]) |
|
for item in history]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment" |
|
model_id_2 = "microsoft/deberta-xlarge-mnli" |
|
model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english" |
|
model_id_4 = "lordtt13/emo-mobilebert" |
|
model_id_5 = "juliensimon/reviews-sentiment-analysis" |
|
model_id_6 = "sbcBI/sentiment_analysis_model" |
|
model_id_7 = "oliverguhr/german-sentiment-bert" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import pipeline |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sentiment_pipe = pipeline("sentiment-analysis", model=model_id_7) |
|
|
|
|
|
def pipeline_predict_sentiment(text): |
|
sentiment_result = sentiment_pipe(text) |
|
print(sentiment_result) |
|
return sentiment_result |
|
|
|
|
|
chat_pipe = pipeline("conversational") |
|
|
|
def pipeline_predict_chat(text): |
|
sentiment_result = chat_pipe(text) |
|
print(sentiment_result) |
|
return sentiment_result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chat_model_facebook_blenderbot_400M_distill = "facebook/blenderbot-400M-distill" |
|
chat_model_HenryJJ_vincua_13b = "HenryJJ/vincua-13b" |
|
|
|
text = "Why did the chicken cross the road?" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_factextract = HuggingFaceHub( |
|
|
|
|
|
repo_id="google/flan-t5-small", |
|
model_kwargs={"temperature":0.1, |
|
"max_new_tokens":250}) |
|
|
|
fact_extraction_prompt = PromptTemplate( |
|
input_variables=["text_input"], |
|
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}" |
|
) |
|
|
|
def factextraction (message): |
|
fact_extraction_chain = LLMChain(llm=llm_factextract, prompt=fact_extraction_prompt) |
|
facts = fact_extraction_chain.run(message) |
|
print(facts) |
|
return facts |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_name_chat = 'facebook/blenderbot-400M-distill' |
|
tokenizer = BlenderbotTokenizer.from_pretrained(model_name_chat) |
|
model_chat = BlenderbotForConditionalGeneration.from_pretrained(model_name_chat) |
|
|
|
def func (message): |
|
inputs = tokenizer(message, return_tensors="pt") |
|
result = model_chat.generate(**inputs) |
|
print(result) |
|
return tokenizer.decode(result[0]) |
|
|
|
title="Conversation Bota" |
|
desc="Some way ... " |
|
app = gr.Interface( |
|
fn=func, |
|
title="Conversation Bota", |
|
inputs=["text", "checkbox", gr.Slider(0, 100)], |
|
outputs=["text", "number"], |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
examples = [ |
|
["Erzähl mit eine Geschichte!",50,2,3,1,"Deutsch"], |
|
["Welche Blumen sollte man jemandem zum Valentinstag schenken?",50,1,0,1,"Deutsch"], |
|
["Please write a step by step recipe to make bolognese pasta!",50,2,3,2,"Englisch"] |
|
] |
|
tDeEn = pipeline(model="Helsinki-NLP/opus-mt-de-en") |
|
tEnDe = pipeline(model="Helsinki-NLP/opus-mt-en-de") |
|
bot = pipeline(model="google/flan-t5-large") |
|
|
|
def solve(text,max_length,length_penalty,no_repeat_ngram_size,num_beams,language): |
|
if(language=="Deutsch"): |
|
text=tDeEn(text)[0]["translation_text"] |
|
out=bot(text,max_length=max_length, length_penalty=length_penalty, no_repeat_ngram_size=no_repeat_ngram_size, num_beams=num_beams, early_stopping=True)[0]["generated_text"] |
|
if(language=="Deutsch"): |
|
out=tEnDe(out)[0]["translation_text"] |
|
return out |
|
|
|
task = gr.Interface( |
|
fn=solve, |
|
inputs=[ |
|
gr.Textbox(lines=5,max_lines=6,label="Frage"), |
|
gr.Slider(minimum=1.0,maximum=200.0,value=50.0,step=1,interactive=True,label="max_length"), |
|
gr.Slider(minimum=1.0,maximum=20.0,value=1.0,step=1,interactive=True,label="length_penalty"), |
|
gr.Slider(minimum=0.0,maximum=5.0,value=3.0,step=1,interactive=True,label="no_repeat_ngram_size"), |
|
gr.Slider(minimum=1.0,maximum=20.0,value=1.0,step=1,interactive=True,label="num_beams"), |
|
gr.Dropdown(["Deutsch", "Englisch"],value="Deutsch"), |
|
], |
|
outputs="text", |
|
title=title, |
|
description=desc, |
|
examples=examples |
|
) |
|
|
|
|
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TextIteratorStreamer |
|
from threading import Thread |
|
|
|
model_id = "philschmid/instruct-igel-001" |
|
model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True) |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
prompt_template = f"### Anweisung:\n{{input}}\n\n### Antwort:" |
|
|
|
def generate(instruction, temperature=1.0, max_new_tokens=256, top_p=0.9, length_penalty=1.0): |
|
formatted_instruction = prompt_template.format(input=instruction) |
|
|
|
|
|
temperature = float(temperature) |
|
top_p = float(top_p) |
|
length_penalty = float(length_penalty) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
streamer = TextIteratorStreamer(tokenizer) |
|
model_inputs = tokenizer(formatted_instruction, return_tensors="pt", truncation=True, max_length=2048) |
|
|
|
model_inputs = {k: v.to(device) for k, v in model_inputs.items()} |
|
|
|
generate_kwargs = dict( |
|
top_p=top_p, |
|
top_k=0, |
|
temperature=temperature, |
|
do_sample=True, |
|
max_new_tokens=max_new_tokens, |
|
early_stopping=True, |
|
length_penalty=length_penalty, |
|
eos_token_id=tokenizer.eos_token_id, |
|
pad_token_id=tokenizer.eos_token_id, |
|
) |
|
t = Thread(target=model.generate, kwargs={**dict(model_inputs, streamer=streamer), **generate_kwargs}) |
|
t.start() |
|
|
|
output = "" |
|
hidden_output = "" |
|
for new_text in streamer: |
|
|
|
if len(hidden_output) <= len(formatted_instruction): |
|
hidden_output += new_text |
|
continue |
|
|
|
if tokenizer.eos_token in new_text: |
|
new_text = new_text.replace(tokenizer.eos_token, "") |
|
output += new_text |
|
yield output |
|
|
|
|
|
return output |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
classifier = pipeline("zero-shot-classification") |
|
text = "This is a tutorial about Hugging Face." |
|
candidate_labels = ["informieren", "kaufen", "beschweren", "verkaufen"] |
|
|
|
def topic_sale_inform (text): |
|
res = classifier(text, candidate_labels) |
|
print (res) |
|
return res |
|
|
|
|
|
|
|
|
|
|
|
|
|
def callChains(current_message,max_length,length_penalty,no_repeat_ngram_size,num_beams,language): |
|
|
|
sentiment_analysis_result = pipeline_predict_sentiment(current_message) |
|
topic_sale_inform_result = topic_sale_inform(current_message) |
|
|
|
|
|
final_answer = func(current_message) |
|
|
|
return final_answer, sentiment_analysis_result, topic_sale_inform_result |
|
|
|
|
|
|
|
current_message_inputfield = gr.Textbox(lines=5,max_lines=6,label="Gib hier eine Nachricht ein") |
|
final_answer_inputfield = gr.Textbox(label="Antwort ", placeholder="Hier kommt die Antwort hin ...") |
|
sentiment_analysis_result_inputfield = gr.Textbox(label="Sentiment ") |
|
topic_sale_inform_result_inputfield = gr.Textbox(label="Thema ") |
|
|
|
chat_bot = gr.Interface(fn=callChains , |
|
inputs=[ |
|
current_message_inputfield, |
|
gr.Slider(minimum=1.0,maximum=200.0,value=50.0,step=1,interactive=True,label="max_length"), |
|
gr.Slider(minimum=1.0,maximum=20.0,value=1.0,step=1,interactive=True,label="length_penalty"), |
|
gr.Slider(minimum=0.0,maximum=5.0,value=3.0,step=1,interactive=True,label="no_repeat_ngram_size"), |
|
gr.Slider(minimum=1.0,maximum=20.0,value=1.0,step=1,interactive=True,label="num_beams"), |
|
gr.Dropdown(["Deutsch", "Englisch"],value="Deutsch"), |
|
], |
|
outputs=[final_answer_inputfield,sentiment_analysis_result_inputfield,topic_sale_inform_result_inputfield], |
|
title="Conversation Bot with extra") |
|
|
|
chat_bot.launch() |
|
|
|
|
|
|
|
app_facts = gr.Interface(fn=factextraction , inputs="textbox", outputs="textbox", title="Conversation Bots") |
|
|
|
|
|
|
|
|
|
|
|
|