{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "source": [ "#upload the fine_tuned_model.zip and narrative_texts.csv then run the code for evaluation\n", "\n", "import zipfile\n", "import os\n", "\n", "#if the folder doesn't exist already, then extract the model\n", "if not os.path.exists(\"fine_tuned_model\"):\n", " with zipfile.ZipFile(\"fine_tuned_model.zip\", 'r') as zip_ref:\n", " zip_ref.extractall(\"fine_tuned_model\") #extract all model files into the target folder\n", "\n", "print(\"Model extracted successfully.\") #confirmation message" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "9iMmMqqB6Hf_", "outputId": "cb0c6eb8-6650-4087-9bb7-078ec6012375" }, "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Model extracted successfully.\n" ] } ] }, { "cell_type": "code", "source": [ "import torch #for deep learning\n", "from transformers import BertTokenizer, BertForSequenceClassification #model training in bert\n", "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score #evaulation metrics\n", "import pandas as pd\n", "import re #regex\n", "\n", "#load fine-tuned model and tokenizer\n", "model_path = \"./fine_tuned_model\"\n", "tokenizer = BertTokenizer.from_pretrained(model_path)\n", "model = BertForSequenceClassification.from_pretrained(model_path)\n", "model.eval() #set model to evaluation mode\n", "\n", "#load dataset and normalize the text\n", "df = pd.read_csv(\"narrative_texts.csv\")\n", "df['text'] = df['text'].str.lower() #convert to lowercase\n", "df['text'] = df['text'].apply(lambda x: re.sub(r'[^a-z\\s]', '', x)) #remove non-alphabetic characters\n", "df['text'] = df['text'].apply(lambda x: re.sub(r'\\s+', ' ', x).strip()) #clean extra spaces\n", "\n", "#function to swap gendered words in text\n", "def gender_swap(text):\n", " swaps = {\n", " \" he \": \" TEMP \", \" she \": \" he \", \" TEMP \": \" she \",\n", " \" his \": \" TEMP2 \", \" her \": \" his \", \" TEMP2 \": \" her \",\n", " \" him \": \" TEMP3 \", \" her \": \" him \", \" TEMP3 \": \" her \"\n", " }\n", " for key, value in swaps.items():\n", " text = text.replace(key, value)\n", " return text\n", "\n", "#generate swapped gender versions of each sentence\n", "df['text_swapped'] = df['text'].apply(lambda x: gender_swap(\" \" + x + \" \"))\n", "\n", "#create a mixed dataset of original and swapped texts\n", "df_mixed = pd.concat([df['text'], df['text_swapped']], ignore_index=True)\n", "labels_mixed = [0] * len(df) + [1] * len(df) #label 0 for original, 1 for swapped\n", "\n", "#function to evaluate model performance\n", "def evaluate_model(texts, labels):\n", " inputs = tokenizer(texts.tolist(), truncation=True, padding=True, return_tensors=\"pt\", max_length=128)\n", "\n", " with torch.no_grad():\n", " outputs = model(**inputs)\n", " logits = outputs.logits\n", " preds = torch.argmax(logits, dim=1).numpy()\n", "\n", " acc = accuracy_score(labels, preds)\n", " precision = precision_score(labels, preds)\n", " recall = recall_score(labels, preds)\n", " f1 = f1_score(labels, preds)\n", "\n", " return {\n", " \"Accuracy\": round(acc, 4),\n", " \"Precision\": round(precision, 4),\n", " \"Recall\": round(recall, 4),\n", " \"F1 Score\": round(f1, 4)\n", " }" ], "metadata": { "id": "xnCn3rmr62nN" }, "execution_count": 5, "outputs": [] }, { "cell_type": "code", "source": [ "#evaluating the model on both original and gender-swapped text\n", "metrics = evaluate_model(df_mixed, labels_mixed)\n", "\n", "#printing out the evaluation results\n", "print(\"Model Evaluation Results:\")\n", "for metric, value in metrics.items():\n", " print(f\"{metric}: {value}\") #prints each metric and its value one by one" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Tyn_TmKo7USd", "outputId": "75ae6a93-a783-4357-fd13-d9441a8a7744" }, "execution_count": 7, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Model Evaluation Results:\n", "Accuracy: 0.55\n", "Precision: 0.5385\n", "Recall: 0.7\n", "F1 Score: 0.6087\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "GfvTDUPp7Wi1" }, "execution_count": null, "outputs": [] } ] }