import streamlit as st from transformers import pipeline import torch # Page Config st.set_page_config(page_title='Transformers in NLP', layout='wide') # Page Title st.markdown('

🤖 Transformers & Pretrained Models in NLP 🚀

', unsafe_allow_html=True) # Transformer Architecture st.markdown('

📌 1. Transformer Architecture

', unsafe_allow_html=True) st.subheader('🔎 Definition:') st.write(""" The **Transformer architecture** revolutionized NLP by using **self-attention** to process sequences in parallel. - **Self-attention** enables words to focus on others dynamically. - The **encoder-decoder** structure is used in tasks like translation. 📜 Introduced in "**Attention is All You Need**" (Vaswani et al., 2017). """) st.subheader('🛠️ Key Components:') st.write(""" - **Encoder**: Processes input tokens into internal representations. - **Decoder**: Uses encoder outputs to generate predictions. - **Multi-head Attention**: Allows diverse focus across sequences. - **Positional Encoding**: Injects sequence order into embeddings. """) # Pretrained Models st.markdown('

📌 2. Pretrained Models

', unsafe_allow_html=True) st.subheader('🔍 Definition:') st.write(""" Pretrained models leverage vast corpora to understand language patterns. - **BERT**: Bi-directional context learning for diverse NLP tasks. - **GPT**: Text generation with autoregressive modeling. - **RoBERTa**: Optimized BERT variant. - **T5**: Universal text-to-text learning. - **XLNet**: Captures dependencies in all positions. """) # Sentiment Analysis Example st.subheader('🎭 Pretrained Model Example: Sentiment Analysis') nlp = pipeline("sentiment-analysis", model="bert-base-uncased") text = st.text_area("📝 Enter text to analyze", "Transformers are amazing!") if st.button('📊 Analyze Sentiment'): result = nlp(text) st.write(f"**🧐 Result:** {result}") # Fine-tuning Pretrained Models st.markdown('

📌 3. Fine-tuning Pretrained Models

', unsafe_allow_html=True) st.subheader('⚙️ Definition:') st.write(""" Fine-tuning tailors pretrained models to specific NLP tasks: - **Sentiment Analysis**: Classifies text sentiments. - **Named Entity Recognition (NER)**: Detects names, locations, organizations. - **Question Answering**: Extracts answers from given contexts. """) # NER Example st.subheader('🔎 Named Entity Recognition (NER)') nlp_ner = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english") text_ner = st.text_area("🔤 Enter text for NER", "Barack Obama was born in Hawaii.") if st.button('🔬 Perform NER'): ner_results = nlp_ner(text_ner) st.write("**🔍 NER Results:**") for entity in ner_results: st.write(f"📝 {entity['word']} - {entity['entity']} - Confidence: {entity['score']:.2f}") # Question Answering Example st.subheader('🧠 Question Answering with BERT') nlp_qa = pipeline("question-answering", model="bert-large-uncased-whole-word-masking-finetuned-squad") context = st.text_area("📚 Enter context", "Transformers revolutionized NLP with parallel processing.") question = st.text_input("❓ Ask a question", "What did transformers revolutionize?") if st.button('🎤 Get Answer'): answer = nlp_qa(question=question, context=context) st.write(f"**📝 Answer:** {answer['answer']}") st.markdown('

✨ Thanks for Exploring NLP! ✨

', unsafe_allow_html=True)