tweet_temporal_shift / statistics.py
asahi417's picture
init
40423d5
import pandas as pd
from datasets import load_dataset
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("roberta-base")
stats = []
for i in ["emoji_temporal", "hate_temporal", "nerd_temporal", "ner_temporal", "topic_temporal", "sentiment_small_temporal"]:
for s in ["train", "validation", "test"]:
dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
df = dataset.to_pandas()
if i != "nerd_temporal":
token_length = [len(tokenizer.tokenize(t)) for t in dataset['text']]
else:
token_length = [len(tokenizer.tokenize(f"{d['target']} {tokenizer.sep_token} {d['definition']} {tokenizer.sep_token} {d['text']}")) for d in dataset]
token_length_in = [i for i in token_length if i <= 126]
date = pd.to_datetime(df.date).sort_values().values
stats.append({
"data": i,
"split": s,
"size": len(dataset),
"size (token length < 128)": len(token_length_in),
"mean_token_length": sum(token_length)/len(token_length),
"date": f'{str(date[0]).split("T")[0]} / {str(date[-1]).split("T")[0]}',
})
df = pd.DataFrame(stats)
print(df)
pretty_name = {
"emoji_temporal": "Emoji",
"hate_temporal": "Hate",
"nerd_temporal": "NERD",
"ner_temporal": "NER",
"topic_temporal": "Topic",
"sentiment_small_temporal": "Sentiment"
}
df.index = [pretty_name[i] for i in df.pop("data")]
df = df[["split", "size", "date"]]
pretty_name_split = {"train": "Train", "validation": "Valid", "test": "Test"}
df["split"] = [pretty_name_split[i] for i in df["split"]]
df.columns = [i.capitalize() for i in df.columns]
df['Size'] = df['Size'].map('{:,}'.format)
print(df.to_latex())