| from datasets import load_dataset | |
| from pathlib import Path | |
| import pandas as pd | |
| import os | |
| import pickle | |
| import logging | |
| import time | |
| import evaluate | |
| import nltk | |
| CURRENT_PATH = Path(__file__).parent | |
| logging.basicConfig(level=logging.INFO, | |
| format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug_ngrams.txt'), filemode='w') | |
| nltk.download("stopwords") | |
| nltk.download("punkt") | |
| def tokenizer(text): | |
| return nltk.tokenize.word_tokenize(text, language="portuguese") | |
| def load_pipelines(): | |
| in_path = os.path.join(CURRENT_PATH, 'models', 'n_grams') | |
| pipeline = [] | |
| for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']: | |
| with open(os.path.join(in_path, f'{domain}.pickle'), 'rb') as f: | |
| logging.info(f"Loading {domain} pipeline...") | |
| pipeline.append({ | |
| 'pipeline': pickle.load(f), | |
| 'train_domain': domain, | |
| }) | |
| return pipeline | |
| def benchmark(pipeline, debug=False): | |
| df_results = pd.DataFrame( | |
| columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall']) | |
| train_domain = pipeline['train_domain'] | |
| pipeline = pipeline['pipeline'] | |
| for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']: | |
| logging.info(f"Test Domain {test_domain}...") | |
| dataset = load_dataset( | |
| 'arubenruben/Portuguese_Language_Identification', test_domain, split='test') | |
| if debug: | |
| logging.info("Debug mode: using only 100 samples") | |
| dataset = dataset.shuffle().select(range(100)) | |
| else: | |
| dataset = dataset.shuffle().select(range(min(50_000, len(dataset)))) | |
| y = pipeline.predict(dataset['text']) | |
| accuracy = evaluate.load('accuracy').compute( | |
| predictions=y, references=dataset['label'])['accuracy'] | |
| f1 = evaluate.load('f1').compute( | |
| predictions=y, references=dataset['label'])['f1'] | |
| precision = evaluate.load('precision').compute( | |
| predictions=y, references=dataset['label'])['precision'] | |
| recall = evaluate.load('recall').compute( | |
| predictions=y, references=dataset['label'])['recall'] | |
| logging.info( | |
| f"Accuracy: {accuracy} | F1: {f1} | Precision: {precision} | Recall: {recall}") | |
| df_results = pd.concat([df_results, pd.DataFrame( | |
| [[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True) | |
| return df_results | |
| def test(): | |
| DEBUG = False | |
| logging.info(f"Debug mode: {DEBUG}") | |
| pipelines = load_pipelines() | |
| df_results = pd.DataFrame( | |
| columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall']) | |
| for pipeline in pipelines: | |
| logging.info(f"Train Domain {pipeline['train_domain']}...") | |
| df_results = pd.concat( | |
| [df_results, benchmark(pipeline, debug=True)], ignore_index=True) | |
| logging.info("Saving results...") | |
| df_results.to_json(os.path.join(CURRENT_PATH, 'out', 'n_grams.json'), | |
| orient='records', indent=4, force_ascii=False) | |
| if __name__ == "__main__": | |
| test() | |