art-manuh commited on
Commit
65f2355
·
verified ·
1 Parent(s): 0bd265b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -9,24 +9,29 @@ from keras.utils import pad_sequences
9
 
10
  max_len = 200
11
 
12
- # set the vocabulary mapping on a new TextVectorization layer
13
  with open('tokenizer.pickle', 'rb') as handle:
14
  tokenizer = pickle.load(handle)
 
 
15
  model = tf.keras.models.load_model('toxic.h5')
16
 
 
17
 
18
- arr=["toxic","severe_toxic","obscene threat","insult","identity_hate"]
19
  def score_comment(comment):
20
  sequences = tokenizer.texts_to_sequences([comment])
21
- inp = pad_sequences(sequences,maxlen=max_len)
22
  results = model.predict(inp)
23
  text = ''
24
  for i in range(len(arr)):
25
- text += '{}: {}\n'.format(arr[i], results[0][i]>0.5)
26
  return text
27
 
28
- interface = gr.Interface(fn=score_comment,
29
- inputs=gr.inputs.Textbox(lines=2, placeholder='Comment to score'),
30
- outputs='text')
 
 
31
 
32
- interface.launch(debug=True,share=True)
 
 
9
 
10
  max_len = 200
11
 
12
+ # Load the tokenizer
13
  with open('tokenizer.pickle', 'rb') as handle:
14
  tokenizer = pickle.load(handle)
15
+
16
+ # Load the pre-trained model
17
  model = tf.keras.models.load_model('toxic.h5')
18
 
19
+ arr = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
20
 
 
21
  def score_comment(comment):
22
  sequences = tokenizer.texts_to_sequences([comment])
23
+ inp = pad_sequences(sequences, maxlen=max_len)
24
  results = model.predict(inp)
25
  text = ''
26
  for i in range(len(arr)):
27
+ text += '{}: {}\n'.format(arr[i], results[0][i] > 0.5)
28
  return text
29
 
30
+ # Update the Gradio interface to use the latest syntax
31
+ inputs = gr.Textbox(lines=2, placeholder='Comment to score')
32
+ outputs = gr.Textbox()
33
+
34
+ interface = gr.Interface(fn=score_comment, inputs=inputs, outputs=outputs)
35
 
36
+ # Launch the interface
37
+ interface.launch(debug=True, share=True)