File size: 2,502 Bytes
868fbbe
a70aa3f
868fbbe
 
73927d9
73f55f8
868fbbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b81b7f
868fbbe
 
45f2671
868fbbe
 
 
 
 
 
3b81b7f
 
73927d9
 
868fbbe
 
 
 
 
 
 
 
 
73927d9
 
868fbbe
73927d9
 
 
 
 
b464875
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import torch
import torch.nn as nn
import re
import pickle
import gradio as gr
import spaces

# Define paths
MODEL_PATH = "spam_model.pth"
VOCAB_PATH = "vocab.pkl"

class TransformerEncoder(nn.Module):
    def __init__(self, d_model=256, num_heads=1, d_ff=512, num_layers=1, vocab_size=10000, max_seq_len=100, dropout=0.1):
        super(TransformerEncoder, self).__init__()
        
        # Embedding & Positional Encoding
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.positional_encoding = nn.Parameter(torch.zeros(1, max_seq_len, d_model))

        # Transformer Encoder Layers
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=num_heads,
            dim_feedforward=d_ff,
            dropout=dropout,
            activation='relu',
            batch_first=True
        )
        
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        # Classification Head
        self.fc = nn.Linear(d_model, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.embedding(x) + self.positional_encoding[:, :x.size(1), :]
        x = self.encoder(x)  # Pass through transformer
        x = x[:, 0, :]  # Take first token's output (CLS token equivalent)
        x = self.fc(x)
        return self.sigmoid(x)  # Binary classification (spam or not)

with open(VOCAB_PATH, "rb") as f:
    vocab = pickle.load(f)

# Load model
device = torch.device("cuda")
model = TransformerEncoder(d_model=256, num_heads=1, num_layers=1, vocab_size=len(vocab), max_seq_len=100).to(device)
model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
model.to(device)
model.eval()  # Set model to evaluation mode

print("✅ Model and vocabulary loaded successfully!")

def simple_tokenize(text):
    return re.findall(r"\b\w+\b", text.lower())

@spaces.GPU
def predict(text):
    max_len=100
    model.eval()
    tokens = simple_tokenize(text.lower())
    token_ids = [vocab.get(word, vocab['<UNK>']) for word in tokens]
    token_ids += [vocab['<PAD>']] * (max_len - len(token_ids))  # Pad if needed
    input_tensor = torch.tensor([token_ids], dtype=torch.long).to(device)

    with torch.no_grad():
        output = model(input_tensor).squeeze().item()
    
    predicted_label = "Spam" if output > 0.5 else "Ham"
    return f"Predicted Class : {predicted_label} "

gr.Interface(
    fn=predict,
    inputs="text",
    outputs="text",
    title="Encoder Spam Classifier"
).launch()