Chris4K commited on
Commit
b7555d0
1 Parent(s): f39437f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -1
app.py CHANGED
@@ -23,7 +23,60 @@ from langchain.llms import HuggingFaceHub
23
 
24
  # for the chain and prompt
25
  from langchain.prompts import PromptTemplate
26
- from langchain.chains import LLMChain
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  #import model class and tokenizer
29
  from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
 
23
 
24
  # for the chain and prompt
25
  from langchain.prompts import PromptTemplate
26
+ from langchain.chains import LLMChain, SimpleSequentialChain
27
+
28
+ ###################
29
+
30
+ llm = HuggingFaceHub(
31
+
32
+ # repo_id="google/flan-ul2",
33
+ repo_id="google/flan-t5-small",
34
+ model_kwargs={"temperature":0.1,
35
+ "max_new_tokens":250})
36
+
37
+
38
+ # Chain 1: Generating a rephrased version of the user's question
39
+ template = """{question}\n\n"""
40
+ prompt_template = PromptTemplate(input_variables=["question"], template=template)
41
+ question_chain = LLMChain(llm=llm, prompt=prompt_template)
42
+
43
+ # Chain 2: Generating assumptions made in the statement
44
+ template = """Here is a statement:
45
+ {statement}
46
+ Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
47
+ prompt_template = PromptTemplate(input_variables=["statement"], template=template)
48
+ assumptions_chain = LLMChain(llm=llm, prompt=prompt_template)
49
+ assumptions_chain_seq = SimpleSequentialChain(
50
+ chains=[question_chain, assumptions_chain], verbose=True
51
+ )
52
+
53
+ # Chain 3: Fact checking the assumptions
54
+ template = """Here is a bullet point list of assertions:
55
+ {assertions}
56
+ For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
57
+ prompt_template = PromptTemplate(input_variables=["assertions"], template=template)
58
+ fact_checker_chain = LLMChain(llm=llm, prompt=prompt_template)
59
+ fact_checker_chain_seq = SimpleSequentialChain(
60
+ chains=[question_chain, assumptions_chain, fact_checker_chain], verbose=True
61
+ )
62
+
63
+ # Final Chain: Generating the final answer to the user's question based on the facts and assumptions
64
+ template = """In light of the above facts, how would you answer the question '{}'""".format(
65
+ user_question
66
+ )
67
+ template = """{facts}\n""" + template
68
+ prompt_template = PromptTemplate(input_variables=["facts"], template=template)
69
+ answer_chain = LLMChain(llm=llm, prompt=prompt_template)
70
+ overall_chain = SimpleSequentialChain(
71
+ chains=[question_chain, assumptions_chain, fact_checker_chain, answer_chain],
72
+ verbose=True,
73
+ )
74
+
75
+ print(overall_chain.run("What is the capitol of the usa?"))
76
+
77
+ ##################
78
+
79
+
80
 
81
  #import model class and tokenizer
82
  from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration