Alexvatti commited on
Commit
73927d9
·
verified ·
1 Parent(s): 5bf31ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -7,6 +7,7 @@ import re
7
  import torch
8
  import pickle
9
  import json
 
10
 
11
  # Define paths
12
  MODEL_PATH = "spam_model.pth"
@@ -57,7 +58,8 @@ print("✅ Model and vocabulary loaded successfully!")
57
  def simple_tokenize(text):
58
  return re.findall(r"\b\w+\b", text.lower())
59
 
60
- def predict(text, model, vocab, max_len=100):
 
61
  model.eval()
62
  tokens = simple_tokenize(text.lower())
63
  token_ids = [vocab.get(word, vocab['<UNK>']) for word in tokens]
@@ -67,8 +69,12 @@ def predict(text, model, vocab, max_len=100):
67
  with torch.no_grad():
68
  output = model(input_tensor).squeeze().item()
69
 
70
- return "Spam" if output > 0.5 else "Ham"
 
71
 
72
- # Test prediction
73
- sample_text = "FreeMsg Hey there darling it's been 3 week's now and no word back! I'd like some fun you up for it still? Tb ok! XxX std chgs to send, £1.50 to rcv"
74
- print(f"Prediction: {predict(sample_text, model, vocab)}")
 
 
 
 
7
  import torch
8
  import pickle
9
  import json
10
+ import gradio as gr
11
 
12
  # Define paths
13
  MODEL_PATH = "spam_model.pth"
 
58
  def simple_tokenize(text):
59
  return re.findall(r"\b\w+\b", text.lower())
60
 
61
+ def predict(text):
62
+ max_len=100
63
  model.eval()
64
  tokens = simple_tokenize(text.lower())
65
  token_ids = [vocab.get(word, vocab['<UNK>']) for word in tokens]
 
69
  with torch.no_grad():
70
  output = model(input_tensor).squeeze().item()
71
 
72
+ predicted_label = "Spam" if output > 0.5 else "Ham"
73
+ return f"Predicted Class : {predicted_label} "
74
 
75
+ gr.Interface(
76
+ fn=predict,
77
+ inputs="text",
78
+ outputs="text",
79
+ title="Encoder Spam Classifier"
80
+ ).launch()