| import os | |
| import json | |
| import re | |
| from datasets import load_dataset | |
| os.makedirs("data/tweet_topic", exist_ok=True) | |
| data = load_dataset("cardiffnlp/tweet_topic_multi") | |
| re_user = re.compile(r'{@[^@^}]*@}') | |
| def process(tmp): | |
| tmp = [i.to_dict() for _, i in tmp.iterrows()] | |
| for i in tmp: | |
| i.pop("label") | |
| text = i['text'] | |
| users = re_user.findall(text) | |
| for u in users: | |
| text = text.replace(u, u.replace("{@", "@").replace("@}", "").replace(" ", "_")) | |
| text = text.replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}") | |
| i['text'] = text | |
| i['condition'] = f'Topics: {", ".join([x.replace("_", " ") for x in i.pop("label_name")])}' | |
| return tmp | |
| train = process(data["train_2020"].to_pandas()) | |
| train += process(data["train_2021"].to_pandas()) | |
| val = process(data["validation_2020"].to_pandas()) | |
| val += process(data["validation_2021"].to_pandas()) | |
| test = process(data["test_2021"].to_pandas()) | |
| os.makedirs("dataset/topic", exist_ok=True) | |
| with open("dataset/topic/train.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in train])) | |
| with open("dataset/topic/validation.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in val])) | |
| with open("dataset/topic/test.jsonl", "w") as f: | |
| f.write("\n".join([json.dumps(i) for i in test])) | |