File size: 7,302 Bytes
dc61339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dffa9b9
 
cd9ae06
 
dffa9b9
 
dc61339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c45a49c
dc61339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dffa9b9
dc61339
 
 
 
 
 
 
dffa9b9
 
 
dc61339
 
 
 
 
 
dffa9b9
 
 
 
dc61339
7370f33
 
 
 
 
 
 
 
 
 
dc61339
 
 
 
dffa9b9
dc61339
dffa9b9
dc61339
dffa9b9
e3304a2
 
 
 
 
7370f33
 
 
 
dc61339
e3304a2
dc61339
 
 
 
 
7370f33
 
 
 
 
 
 
dc61339
e3304a2
dc61339
 
 
7370f33
dc61339
7370f33
 
dc61339
 
dffa9b9
dc61339
 
4491123
 
dc61339
 
 
 
 
 
 
7370f33
4491123
dc61339
 
 
 
 
7370f33
4491123
7370f33
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
# -*- coding: utf-8 -*-
"""
@Project    : indexing
@File       : SciGraph
@Email      : [email protected]
@Author     : Yan Yuchen
@Time       : 2023/3/9 12:53
"""
import json
import datasets
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split


_CITATION = """\
@InProceedings{yan-EtAl:2022:Poster,
  author    = {Yuchen Yan and Chong Chen},
  title     = {SciGraph: A Knowledge Graph Constructed by Function and Topic Annotation of Scientific Papers},
  booktitle = {3rd Workshop on Extraction and Evaluation of Knowledge Entities from Scientific Documents (EEKE2022), June 20-24, 2022, Cologne, Germany and Online},
  month     = {June},
  year      = {2022},
  address   = {Beijing, China},
  url       = {https://ceur-ws.org/Vol-3210/paper16.pdf}
}
"""

_DESCRIPTION = """\
"""

_HOMEPAGE = ""

# The license information was obtained from https://github.com/boudinfl/ake-datasets as the dataset shared over here is taken from here
_LICENSE = ""

_URLS = {
    'classes': 'class.json',
    'function': 'assign.json',
    'topic': 'paper_new.json'
}

# TODO: Add link to the official dataset URLs here


# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class SciGraph(datasets.GeneratorBasedBuilder):
    """TODO: Short description of my dataset."""

    VERSION = datasets.Version("0.0.1")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="function", version=VERSION,
                               description="This part of my dataset covers extraction"),
        datasets.BuilderConfig(name="topic", version=VERSION,
                               description="This part of my dataset covers generation")
    ]
    
    DEFAULT_CONFIG_NAME = "function"

    def _info(self):
        classes = ['综述与进展', '论证与对比', '思考与探讨', '原理与计算', '技术与方法', '设计与应用']
        if self.config.name == "function":  # This is the name of the configuration selected in BUILDER_CONFIGS above
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "abstract": datasets.Value("string"),
                    "label": datasets.features.ClassLabel(names=classes, num_classes=len(classes))
                }
            )
        else:
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "abstract": datasets.Value("string"),
                    "keywords": datasets.features.Sequence(datasets.Value("string"))
                }
            )

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "split": "train",
                    "classes": data_dir['classes'],
                    "function": data_dir['function'],
                    "topic": data_dir['topic']
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "split": "test",
                    "classes": data_dir['classes'],
                    "function": data_dir['function'],
                    "topic": data_dir['topic']
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "split": "valid",
                    "classes": data_dir['classes'],
                    "function": data_dir['function'],
                    "topic": data_dir['topic']
                },
            )
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, split, classes, function, topic):
        if self.config.name == 'function':
            with open(classes, 'r') as f:
                functions = list(json.load(f).keys())
            data = pd.read_json(function)
            data = data.loc[data[functions].sum(axis=1) == 1]
            data['label'] = [functions[row.tolist().index(1)] for index, row in data[functions].iterrows()]
            data = data[['_id', 'abstract', 'label']]
            
            
            train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42)
            
            test_data = pd.read_json(function)
            test_data = test_data.loc[test_data[functions].sum(axis=1) == 0]
            if split == 'train':
                for idx, row in train_data.iterrows():
                    yield idx, {
                        "id": row._id,
                        "abstract": row.abstract,
                        "label": row.label
                    }
            elif split == 'valid':
                for idx, row in valid_data.iterrows():
                    yield idx, {
                        "id": row._id,
                        "abstract": row.abstract,
                        "label": row.label
                    }
            elif split == 'test':
                for idx, row in test_data.iterrows():
                    yield idx, {
                        "id": row._id,
                        "abstract": row.abstract,
                        "label": -1
                    }
                


        if self.config.name == 'topic':
            data = pd.read_json(topic)
            data = data.replace(to_replace=r'^\s*$', value=np.nan, regex=True).dropna(subset=['keywords'], axis=0)

            train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42)
            test_data = pd.read_json(topic)
            if split == 'train':
                for idx, row in train_data.iterrows():
                    yield idx, {
                        "id": row._id,
                        "abstract": row.abstract,
                        "keywords": row.keywords.split('#%#')
                    }
            elif split == 'valid':
                for idx, row in valid_data.iterrows():
                    yield idx, {
                        "id": row._id,
                        "abstract": row.abstract,
                        "keywords": row.keywords.split('#%#')
                    }
            elif split == 'test':
                for idx, row in test_data.iterrows():
                    yield idx, {
                        "id": row._id,
                        "abstract": row.abstract,
                        "keywords": row.keywords.split('#%#')
                    }