|
|
import tensorflow as tf |
|
|
from tensorflow.keras.layers import Layer, Dense |
|
|
import gradio as gr |
|
|
import joblib |
|
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
|
|
|
|
|
|
|
class BetterAttention(Layer): |
|
|
def __init__(self, units=64, return_attention=False, **kwargs): |
|
|
super(BetterAttention, self).__init__(**kwargs) |
|
|
self.return_attention = return_attention |
|
|
self.W = Dense(units) |
|
|
self.V = Dense(1) |
|
|
|
|
|
def call(self, inputs): |
|
|
score = self.V(tf.nn.tanh(self.W(inputs))) |
|
|
attention_weights = tf.nn.softmax(score, axis=1) |
|
|
context_vector = attention_weights * inputs |
|
|
context_vector = tf.reduce_sum(context_vector, axis=1) |
|
|
return (context_vector, attention_weights) if self.return_attention else context_vector |
|
|
|
|
|
|
|
|
model = tf.keras.models.load_model("sentiment_model.keras", custom_objects={"BetterAttention": BetterAttention}) |
|
|
tokenizer = joblib.load("tokenizer.joblib") |
|
|
|
|
|
|
|
|
max_len = 40 |
|
|
|
|
|
def predict_sentiment(text): |
|
|
seq = tokenizer.texts_to_sequences([text]) |
|
|
padded = pad_sequences(seq, maxlen=max_len, padding='post') |
|
|
pred = model.predict(padded)[0][0] |
|
|
label = "Positive" if pred >= 0.5 else "Negative" |
|
|
confidence = float(pred if pred >= 0.5 else 1 - pred) |
|
|
return {label: confidence} |
|
|
|
|
|
|
|
|
demo = gr.Interface(fn=predict_sentiment, |
|
|
inputs=gr.Textbox(lines=2, placeholder="Enter a tweet..."), |
|
|
outputs=gr.Label(num_top_classes=2), |
|
|
title="Sentiment Analysis on Tweets", |
|
|
description="Enter a tweet and get predicted sentiment with confidence score.") |
|
|
|
|
|
demo.launch() |