Chris4K commited on
Commit
3ae785a
1 Parent(s): 195a960

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -59,21 +59,21 @@ model_id_6 = "sbcBI/sentiment_analysis_model"
59
  model_id_7 = "oliverguhr/german-sentiment-bert"
60
 
61
  # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
62
- llm_hf_sentiment = HuggingFaceHub(
63
- repo_id= model_id_7,
64
- model_kwargs={"temperature":0.9 }
65
- )
66
 
67
- sentiment_prompt = PromptTemplate(
68
- input_variables=["text_input"],
69
- template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
70
- )
71
 
72
- def sentiment ( message):
73
- sentiment_chain = LLMChain(llm=llm_hf_sentiment, prompt=sentiment_prompt)
74
- facts = sentiment_chain.run(message)
75
- print(facts)
76
- return facts
77
 
78
 
79
 
@@ -104,8 +104,8 @@ text = "Why did the chicken cross the road?"
104
  # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
105
  llm_factextract = HuggingFaceHub(
106
 
107
- # repo_id="google/flan-ul2",
108
- repo_id="google/flan-t5-small",
109
  model_kwargs={"temperature":0.1,
110
  "max_new_tokens":256})
111
 
 
59
  model_id_7 = "oliverguhr/german-sentiment-bert"
60
 
61
  # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
62
+ #llm_hf_sentiment = HuggingFaceHub(
63
+ # repo_id= model_id_7,
64
+ # model_kwargs={"temperature":0.9 }
65
+ #)
66
 
67
+ #sentiment_prompt = PromptTemplate(
68
+ # input_variables=["text_input"],
69
+ # template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
70
+ #)
71
 
72
+ #def sentiment ( message):
73
+ # sentiment_chain = LLMChain(llm=llm_hf_sentiment, prompt=sentiment_prompt)
74
+ # facts = sentiment_chain.run(message)
75
+ # print(facts)
76
+ # return facts
77
 
78
 
79
 
 
104
  # https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
105
  llm_factextract = HuggingFaceHub(
106
 
107
+ repo_id="google/flan-ul2",
108
+ # repo_id="google/flan-t5-small",
109
  model_kwargs={"temperature":0.1,
110
  "max_new_tokens":256})
111