Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,24 +9,29 @@ from keras.utils import pad_sequences
|
|
9 |
|
10 |
max_len = 200
|
11 |
|
12 |
-
#
|
13 |
with open('tokenizer.pickle', 'rb') as handle:
|
14 |
tokenizer = pickle.load(handle)
|
|
|
|
|
15 |
model = tf.keras.models.load_model('toxic.h5')
|
16 |
|
|
|
17 |
|
18 |
-
arr=["toxic","severe_toxic","obscene threat","insult","identity_hate"]
|
19 |
def score_comment(comment):
|
20 |
sequences = tokenizer.texts_to_sequences([comment])
|
21 |
-
inp = pad_sequences(sequences,maxlen=max_len)
|
22 |
results = model.predict(inp)
|
23 |
text = ''
|
24 |
for i in range(len(arr)):
|
25 |
-
text += '{}: {}\n'.format(arr[i], results[0][i]>0.5)
|
26 |
return text
|
27 |
|
28 |
-
interface
|
29 |
-
|
30 |
-
|
|
|
|
|
31 |
|
32 |
-
interface
|
|
|
|
9 |
|
10 |
max_len = 200
|
11 |
|
12 |
+
# Load the tokenizer
|
13 |
with open('tokenizer.pickle', 'rb') as handle:
|
14 |
tokenizer = pickle.load(handle)
|
15 |
+
|
16 |
+
# Load the pre-trained model
|
17 |
model = tf.keras.models.load_model('toxic.h5')
|
18 |
|
19 |
+
arr = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
|
20 |
|
|
|
21 |
def score_comment(comment):
|
22 |
sequences = tokenizer.texts_to_sequences([comment])
|
23 |
+
inp = pad_sequences(sequences, maxlen=max_len)
|
24 |
results = model.predict(inp)
|
25 |
text = ''
|
26 |
for i in range(len(arr)):
|
27 |
+
text += '{}: {}\n'.format(arr[i], results[0][i] > 0.5)
|
28 |
return text
|
29 |
|
30 |
+
# Update the Gradio interface to use the latest syntax
|
31 |
+
inputs = gr.Textbox(lines=2, placeholder='Comment to score')
|
32 |
+
outputs = gr.Textbox()
|
33 |
+
|
34 |
+
interface = gr.Interface(fn=score_comment, inputs=inputs, outputs=outputs)
|
35 |
|
36 |
+
# Launch the interface
|
37 |
+
interface.launch(debug=True, share=True)
|