Chris4K commited on
Commit
b468ecf
1 Parent(s): b7555d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -36
app.py CHANGED
@@ -35,42 +35,42 @@ llm = HuggingFaceHub(
35
  "max_new_tokens":250})
36
 
37
 
38
- # Chain 1: Generating a rephrased version of the user's question
39
- template = """{question}\n\n"""
40
- prompt_template = PromptTemplate(input_variables=["question"], template=template)
41
- question_chain = LLMChain(llm=llm, prompt=prompt_template)
42
-
43
- # Chain 2: Generating assumptions made in the statement
44
- template = """Here is a statement:
45
- {statement}
46
- Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
47
- prompt_template = PromptTemplate(input_variables=["statement"], template=template)
48
- assumptions_chain = LLMChain(llm=llm, prompt=prompt_template)
49
- assumptions_chain_seq = SimpleSequentialChain(
50
- chains=[question_chain, assumptions_chain], verbose=True
51
- )
52
-
53
- # Chain 3: Fact checking the assumptions
54
- template = """Here is a bullet point list of assertions:
55
- {assertions}
56
- For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
57
- prompt_template = PromptTemplate(input_variables=["assertions"], template=template)
58
- fact_checker_chain = LLMChain(llm=llm, prompt=prompt_template)
59
- fact_checker_chain_seq = SimpleSequentialChain(
60
- chains=[question_chain, assumptions_chain, fact_checker_chain], verbose=True
61
- )
62
-
63
- # Final Chain: Generating the final answer to the user's question based on the facts and assumptions
64
- template = """In light of the above facts, how would you answer the question '{}'""".format(
65
- user_question
66
- )
67
- template = """{facts}\n""" + template
68
- prompt_template = PromptTemplate(input_variables=["facts"], template=template)
69
- answer_chain = LLMChain(llm=llm, prompt=prompt_template)
70
- overall_chain = SimpleSequentialChain(
71
- chains=[question_chain, assumptions_chain, fact_checker_chain, answer_chain],
72
- verbose=True,
73
- )
74
 
75
  print(overall_chain.run("What is the capitol of the usa?"))
76
 
 
35
  "max_new_tokens":250})
36
 
37
 
38
+ # Chain 1: Generating a rephrased version of the user's question
39
+ template = """{question}\n\n"""
40
+ prompt_template = PromptTemplate(input_variables=["question"], template=template)
41
+ question_chain = LLMChain(llm=llm, prompt=prompt_template)
42
+
43
+ # Chain 2: Generating assumptions made in the statement
44
+ template = """Here is a statement:
45
+ {statement}
46
+ Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
47
+ prompt_template = PromptTemplate(input_variables=["statement"], template=template)
48
+ assumptions_chain = LLMChain(llm=llm, prompt=prompt_template)
49
+ assumptions_chain_seq = SimpleSequentialChain(
50
+ chains=[question_chain, assumptions_chain], verbose=True
51
+ )
52
+
53
+ # Chain 3: Fact checking the assumptions
54
+ template = """Here is a bullet point list of assertions:
55
+ {assertions}
56
+ For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
57
+ prompt_template = PromptTemplate(input_variables=["assertions"], template=template)
58
+ fact_checker_chain = LLMChain(llm=llm, prompt=prompt_template)
59
+ fact_checker_chain_seq = SimpleSequentialChain(
60
+ chains=[question_chain, assumptions_chain, fact_checker_chain], verbose=True
61
+ )
62
+
63
+ # Final Chain: Generating the final answer to the user's question based on the facts and assumptions
64
+ template = """In light of the above facts, how would you answer the question '{}'""".format(
65
+ user_question
66
+ )
67
+ template = """{facts}\n""" + template
68
+ prompt_template = PromptTemplate(input_variables=["facts"], template=template)
69
+ answer_chain = LLMChain(llm=llm, prompt=prompt_template)
70
+ overall_chain = SimpleSequentialChain(
71
+ chains=[question_chain, assumptions_chain, fact_checker_chain, answer_chain],
72
+ verbose=True,
73
+ )
74
 
75
  print(overall_chain.run("What is the capitol of the usa?"))
76