Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
File size: 1,512 Bytes
c41faeb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from itertools import product
import pandas as pd
from datasets import load_dataset


def get_stats(name):
    relation = []
    size = []
    data = load_dataset("relbert/t_rex", name)
    splits = data.keys()
    for split in splits:
        df = data[split].to_pandas()
        size.append({
            "number of pairs": len(df),
            "number of unique relation types": len(df["relation"].unique())
        })
        relation.append(df.groupby('relation')['head'].count().to_dict())
    relation = pd.DataFrame(relation, index=[f"number of pairs ({s})" for s in splits]).T
    relation = relation.fillna(0).astype(int)
    size = pd.DataFrame(size, index=splits).T
    return relation, size

df_relation, df_size = get_stats("filter_unified.min_entity_4_max_predicate_10")
print(f"\n- Number of instances (`filter_unified.min_entity_4_max_predicate_10`) \n\n {df_size.to_markdown()}")
print(f"\n- Number of pairs in each relation type (`filter_unified.min_entity_4_max_predicate_10`) \n\n {df_relation.to_markdown()}")


parameters_min_e_freq = [1, 2, 3, 4]
parameters_max_p_freq = [100, 50, 25, 10]
df_size_list = []
for e, p in product(parameters_min_e_freq, parameters_max_p_freq):
    _, df_size = get_stats(f"filter_unified.min_entity_{e}_max_predicate_{p}")
    df_size.pop("test")
    df_size.columns = [f"min_entity_{e}_max_predicate_{p} ({c})" for c in df_size.columns]
    df_size_list.append(df_size)
df_size_list = pd.concat([i.T for i in df_size_list])
print(df_size_list.to_markdown())