Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import re
|
|
7 |
import torch
|
8 |
import pickle
|
9 |
import json
|
|
|
10 |
|
11 |
# Define paths
|
12 |
MODEL_PATH = "spam_model.pth"
|
@@ -57,7 +58,8 @@ print("✅ Model and vocabulary loaded successfully!")
|
|
57 |
def simple_tokenize(text):
|
58 |
return re.findall(r"\b\w+\b", text.lower())
|
59 |
|
60 |
-
def predict(text
|
|
|
61 |
model.eval()
|
62 |
tokens = simple_tokenize(text.lower())
|
63 |
token_ids = [vocab.get(word, vocab['<UNK>']) for word in tokens]
|
@@ -67,8 +69,12 @@ def predict(text, model, vocab, max_len=100):
|
|
67 |
with torch.no_grad():
|
68 |
output = model(input_tensor).squeeze().item()
|
69 |
|
70 |
-
|
|
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
7 |
import torch
|
8 |
import pickle
|
9 |
import json
|
10 |
+
import gradio as gr
|
11 |
|
12 |
# Define paths
|
13 |
MODEL_PATH = "spam_model.pth"
|
|
|
58 |
def simple_tokenize(text):
|
59 |
return re.findall(r"\b\w+\b", text.lower())
|
60 |
|
61 |
+
def predict(text):
|
62 |
+
max_len=100
|
63 |
model.eval()
|
64 |
tokens = simple_tokenize(text.lower())
|
65 |
token_ids = [vocab.get(word, vocab['<UNK>']) for word in tokens]
|
|
|
69 |
with torch.no_grad():
|
70 |
output = model(input_tensor).squeeze().item()
|
71 |
|
72 |
+
predicted_label = "Spam" if output > 0.5 else "Ham"
|
73 |
+
return f"Predicted Class : {predicted_label} "
|
74 |
|
75 |
+
gr.Interface(
|
76 |
+
fn=predict,
|
77 |
+
inputs="text",
|
78 |
+
outputs="text",
|
79 |
+
title="Encoder Spam Classifier"
|
80 |
+
).launch()
|