|
|
|
import langchain |
|
|
|
from langchain.llms import HuggingFaceHub |
|
|
|
|
|
llm_hf = HuggingFaceHub( |
|
repo_id="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", |
|
model_kwargs={"temperature":0.9 } |
|
) |
|
|
|
text = "Why did the chicken cross the road?" |
|
|
|
output_question_1 = llm_hf(text) |
|
print(output_question_1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fact_extraction_prompt = PromptTemplate( |
|
input_variables=["text_input"], |
|
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}" |
|
) |
|
|
|
fact_extraction_chain = LLMChain(llm=llm, prompt=fact_extraction_prompt) |
|
|
|
facts = fact_extraction_chain.run(text + " " +output_question_1) |
|
|
|
print(facts) |
|
|