File size: 1,643 Bytes
0f14421 44c37a8 0f14421 e14631d a6201ac e14631d 44c37a8 0f14421 a6201ac 0f14421 a6201ac 0f14421 8b0addf 5f4728f 8b0addf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
# https://chat.lmsys.org/?leaderboard
import langchain
# https://huggingface.co/spaces/joyson072/LLm-Langchain/blob/main/app.py
from langchain.llms import HuggingFaceHub
# for the chain and prompt
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceHub
from langchain.chains import LLMChain
####
## Sentinent models
# https://huggingface.co/spaces/CK42/sentiment-model-comparison
# 1, 4 seem best for german
####
model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment"
model_id_2 = "microsoft/deberta-xlarge-mnli"
model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english"
model_id_4 = "lordtt13/emo-mobilebert"
model_id_5 = "juliensimon/reviews-sentiment-analysis"
model_id_6 = "sbcBI/sentiment_analysis_model"
chat_model_facebook-blenderbot-400M-distill = "facebook/blenderbot-400M-distill"
chat_model_HenryJJ-vincua-13b = "HenryJJ/vincua-13b"
# https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
llm_hf = HuggingFaceHub(
repo_id="chat_model_facebook-blenderbot-400M-distill",
model_kwargs={"temperature":0.9 }
)
text = "Why did the chicken cross the road?"
output_question_1 = llm_hf(text)
print(output_question_1)
###
## FACT EXTRACTION
###
fact_extraction_prompt = PromptTemplate(
input_variables=["text_input"],
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
)
fact_extraction_chain = LLMChain(llm=llm_hf, prompt=fact_extraction_prompt)
facts = fact_extraction_chain.run(text + " " +output_question_1)
print(facts)
|