Chris4K commited on
Commit
8031095
1 Parent(s): 7463d8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -15
app.py CHANGED
@@ -56,11 +56,28 @@ model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english"
56
  model_id_4 = "lordtt13/emo-mobilebert"
57
  model_id_5 = "juliensimon/reviews-sentiment-analysis"
58
  model_id_6 = "sbcBI/sentiment_analysis_model"
59
- model_id_6 = "oliverguhr/german-sentiment-bert"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  ####
62
- ## Chat models
63
- # https://huggingface.co/spaces/CK42/sentiment-model-comparison
64
  # 1 seem best for testing
65
  ####
66
  #download and setup the model and tokenizer
@@ -68,14 +85,17 @@ model_name = 'facebook/blenderbot-400M-distill'
68
  tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
69
  model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
70
 
 
 
 
 
 
 
 
71
  chat_model_facebook_blenderbot_400M_distill = "facebook/blenderbot-400M-distill"
72
  chat_model_HenryJJ_vincua_13b = "HenryJJ/vincua-13b"
73
 
74
- # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
75
- #llm_hf = HuggingFaceHub(
76
- # repo_id= chat_model_HenryJJ_vincua_13b,
77
- # model_kwargs={"temperature":0.9 }
78
- #)
79
 
80
 
81
 
@@ -89,17 +109,22 @@ text = "Why did the chicken cross the road?"
89
  ###
90
  ## FACT EXTRACTION
91
  ###
92
-
 
 
 
 
 
93
  fact_extraction_prompt = PromptTemplate(
94
  input_variables=["text_input"],
95
  template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
96
  )
97
 
98
- #fact_extraction_chain = LLMChain(llm=llm_hf, prompt=fact_extraction_prompt)
99
-
100
- #facts = fact_extraction_chain.run(text + " " +output_question_1)
101
-
102
- #print(facts)
103
 
104
 
105
  def func (message):
@@ -108,5 +133,5 @@ def func (message):
108
  return tokenizer.decode(result[0])
109
 
110
  import gradio as gr
111
- app = gr.Interface(fn=func, inputs="textbox", outputs="textbox", title="Conversation Bot")
112
  app.launch()
 
56
  model_id_4 = "lordtt13/emo-mobilebert"
57
  model_id_5 = "juliensimon/reviews-sentiment-analysis"
58
  model_id_6 = "sbcBI/sentiment_analysis_model"
59
+ model_id_7 = "oliverguhr/german-sentiment-bert"
60
+
61
+ # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
62
+ llm_hf_sentiment = HuggingFaceHub(
63
+ repo_id= model_id_7,
64
+ model_kwargs={"temperature":0.9 }
65
+ )
66
+
67
+ fact_extraction_prompt = PromptTemplate(
68
+ input_variables=["text_input"],
69
+ template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
70
+ )
71
+
72
+ def sentiment (llm_factextract, message):
73
+ sentiment_chain = LLMChain(llm=llm, prompt=sentiment_prompt)
74
+ facts = sentiment_chain.run(message)
75
+ print(facts)
76
+ return facts
77
+
78
 
79
  ####
80
+ ## models
 
81
  # 1 seem best for testing
82
  ####
83
  #download and setup the model and tokenizer
 
85
  tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
86
  model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
87
 
88
+
89
+
90
+ ####
91
+ ## Chat models
92
+ # https://huggingface.co/spaces/CK42/sentiment-model-comparison
93
+ # 1 seem best for testing
94
+ ####
95
  chat_model_facebook_blenderbot_400M_distill = "facebook/blenderbot-400M-distill"
96
  chat_model_HenryJJ_vincua_13b = "HenryJJ/vincua-13b"
97
 
98
+
 
 
 
 
99
 
100
 
101
 
 
109
  ###
110
  ## FACT EXTRACTION
111
  ###
112
+ # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
113
+ llm_factextract = HuggingFaceHub(
114
+ repo_id="google/flan-ul2",
115
+ model_kwargs={"temperature":0.1,
116
+ "max_new_tokens":256})
117
+
118
  fact_extraction_prompt = PromptTemplate(
119
  input_variables=["text_input"],
120
  template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
121
  )
122
 
123
+ def factextraction (llm_factextract, message):
124
+ fact_extraction_chain = LLMChain(llm=llm, prompt=fact_extraction_prompt)
125
+ facts = fact_extraction_chain.run(message)
126
+ print(facts)
127
+ return facts
128
 
129
 
130
  def func (message):
 
133
  return tokenizer.decode(result[0])
134
 
135
  import gradio as gr
136
+ app = gr.Interface(fn=factextraction, inputs="textbox", outputs="textbox", title="Conversation Bot")
137
  app.launch()