Chris4K commited on
Commit
55d75c1
1 Parent(s): 425a4a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -6
app.py CHANGED
@@ -1,21 +1,28 @@
 
 
1
  # https://chat.lmsys.org/?leaderboard
 
 
 
 
 
 
 
2
  import langchain
3
  import transformers
 
 
4
  # https://huggingface.co/spaces/joyson072/LLm-Langchain/blob/main/app.py
5
  from langchain.llms import HuggingFaceHub
6
 
7
 
8
  # for the chain and prompt
9
  from langchain.prompts import PromptTemplate
10
- from langchain.llms import HuggingFaceHub
11
  from langchain.chains import LLMChain
12
 
13
  #import model class and tokenizer
14
  from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
15
 
16
- #import model class and tokenizer
17
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
18
-
19
 
20
  ###
21
  # Definition of different purspose prompts
@@ -63,7 +70,28 @@ model_id_7 = "oliverguhr/german-sentiment-bert"
63
  # repo_id= model_id_7,
64
  # model_kwargs={"temperature":0.9 }
65
  #)
66
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  #sentiment_prompt = PromptTemplate(
68
  # input_variables=["text_input"],
69
  # template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
@@ -107,7 +135,7 @@ llm_factextract = HuggingFaceHub(
107
  # repo_id="google/flan-ul2",
108
  repo_id="google/flan-t5-small",
109
  model_kwargs={"temperature":0.1,
110
- "max_new_tokens":256})
111
 
112
  fact_extraction_prompt = PromptTemplate(
113
  input_variables=["text_input"],
 
1
+ ###
2
+ # Elo based comparison of models
3
  # https://chat.lmsys.org/?leaderboard
4
+ ###
5
+
6
+ ##
7
+ # Libraries
8
+ # Langchain - https://python.langchain.com/docs/get_started/introduction.html
9
+ # Used for simplifiing calls, task
10
+ ##
11
  import langchain
12
  import transformers
13
+
14
+
15
  # https://huggingface.co/spaces/joyson072/LLm-Langchain/blob/main/app.py
16
  from langchain.llms import HuggingFaceHub
17
 
18
 
19
  # for the chain and prompt
20
  from langchain.prompts import PromptTemplate
 
21
  from langchain.chains import LLMChain
22
 
23
  #import model class and tokenizer
24
  from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
25
 
 
 
 
26
 
27
  ###
28
  # Definition of different purspose prompts
 
70
  # repo_id= model_id_7,
71
  # model_kwargs={"temperature":0.9 }
72
  #)
73
+
74
+ from transformers import pipeline
75
+
76
+ pipe = pipeline("sentiment", model=model_id_7)
77
+ #pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
78
+
79
+ def predict(text):
80
+ return pipe(text)[0]["translation_text"]
81
+
82
+ demo = gr.Interface(
83
+ fn=predict,
84
+ inputs='text',
85
+ outputs='text',
86
+ ).launch
87
+
88
+ sentiment = gr.load(model_id_7)
89
+
90
+ def sentiment (message):
91
+ sentiment_label = sentiment.predict(message)
92
+ print ( sentiment_label)
93
+ return sentiment_label
94
+
95
  #sentiment_prompt = PromptTemplate(
96
  # input_variables=["text_input"],
97
  # template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
 
135
  # repo_id="google/flan-ul2",
136
  repo_id="google/flan-t5-small",
137
  model_kwargs={"temperature":0.1,
138
+ "max_new_tokens":250})
139
 
140
  fact_extraction_prompt = PromptTemplate(
141
  input_variables=["text_input"],