Lord-Raven
commited on
Commit
·
527cc9f
1
Parent(s):
260fdda
Simplifying.
Browse files- app.py +6 -9
- requirements.txt +1 -2
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import gradio as gr
|
2 |
-
import json
|
3 |
from transformers import AutoTokenizer, pipeline
|
4 |
from optimum.onnxruntime import ORTModelForSequenceClassification
|
5 |
|
@@ -10,21 +9,19 @@ model = ORTModelForSequenceClassification.from_pretrained(model_id, file_name=fi
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
|
12 |
classifier = pipeline(
|
13 |
-
task="text-classification",
|
14 |
-
model=model,
|
15 |
-
tokenizer=tokenizer,
|
16 |
-
top_k=None,
|
17 |
-
function_to_apply="sigmoid",
|
18 |
-
binary_output=True,
|
19 |
)
|
20 |
|
21 |
def predict(param_0):
|
22 |
results = classifier(param_0)
|
23 |
-
|
24 |
"label": results[0][0]["label"],
|
25 |
"confidences": results[0]
|
26 |
}
|
27 |
-
return response
|
28 |
|
29 |
demo = gr.Interface(
|
30 |
fn = predict,
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import AutoTokenizer, pipeline
|
3 |
from optimum.onnxruntime import ORTModelForSequenceClassification
|
4 |
|
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
10 |
|
11 |
classifier = pipeline(
|
12 |
+
task = "text-classification",
|
13 |
+
model = model,
|
14 |
+
tokenizer = tokenizer,
|
15 |
+
top_k = None,
|
16 |
+
function_to_apply = "sigmoid",
|
|
|
17 |
)
|
18 |
|
19 |
def predict(param_0):
|
20 |
results = classifier(param_0)
|
21 |
+
return {
|
22 |
"label": results[0][0]["label"],
|
23 |
"confidences": results[0]
|
24 |
}
|
|
|
25 |
|
26 |
demo = gr.Interface(
|
27 |
fn = predict,
|
requirements.txt
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
torch==2.4.0
|
2 |
huggingface_hub==0.26.0
|
3 |
transformers==4.36
|
4 |
-
optimum[exporters,onnxruntime]==1.21.3
|
5 |
-
json5==0.9.25
|
|
|
1 |
torch==2.4.0
|
2 |
huggingface_hub==0.26.0
|
3 |
transformers==4.36
|
4 |
+
optimum[exporters,onnxruntime]==1.21.3
|
|