Update create_split.py
Browse files- create_split.py +14 -19
create_split.py
CHANGED
|
@@ -6,10 +6,6 @@ import pandas as pd
|
|
| 6 |
from random import shuffle, seed
|
| 7 |
|
| 8 |
|
| 9 |
-
parameters_min_e_freq = [1, 2, 3, 4]
|
| 10 |
-
parameters_max_p_freq = [100, 50, 25, 10]
|
| 11 |
-
|
| 12 |
-
|
| 13 |
def get_test_predicate(_data):
|
| 14 |
tmp_df = pd.DataFrame(_data)
|
| 15 |
predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
|
|
@@ -49,18 +45,17 @@ with open("data/t_rex.filter_unified.test.jsonl") as f:
|
|
| 49 |
|
| 50 |
|
| 51 |
seed(42)
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
f.write('\n'.join([json.dumps(i) for i in data_valid]))
|
|
|
|
| 6 |
from random import shuffle, seed
|
| 7 |
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
def get_test_predicate(_data):
|
| 10 |
tmp_df = pd.DataFrame(_data)
|
| 11 |
predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
|
|
|
|
| 45 |
|
| 46 |
|
| 47 |
seed(42)
|
| 48 |
+
with open(f"data/t_rex.filter_unified.jsonl") as f:
|
| 49 |
+
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 50 |
+
for i in data:
|
| 51 |
+
i['relation'] = i.pop('predicate')
|
| 52 |
+
i['head'] = i.pop('subject')
|
| 53 |
+
i['tail'] = i.pop('object')
|
| 54 |
+
data = [i for i in data if i['relation'] not in test_predicate]
|
| 55 |
+
shuffle(data)
|
| 56 |
+
data_train = data[:int(len(data) * 0.8)]
|
| 57 |
+
data_valid = data[int(len(data) * 0.8):]
|
| 58 |
+
with open(f"data/t_rex.filter_unified.train.jsonl", "w") as f:
|
| 59 |
+
f.write('\n'.join([json.dumps(i) for i in data_train]))
|
| 60 |
+
with open(f"data/t_rex.filter_unified.validation.jsonl", "w") as f:
|
| 61 |
+
f.write('\n'.join([json.dumps(i) for i in data_valid]))
|
|
|