|
|
|
""" |
|
@Project : indexing |
|
@File : SciGraph |
|
@Email : [email protected] |
|
@Author : Yan Yuchen |
|
@Time : 2023/3/9 12:53 |
|
""" |
|
import json |
|
import datasets |
|
import pandas as pd |
|
import numpy as np |
|
from sklearn.model_selection import train_test_split |
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{yan-EtAl:2022:Poster, |
|
author = {Yuchen Yan and Chong Chen}, |
|
title = {SciGraph: A Knowledge Graph Constructed by Function and Topic Annotation of Scientific Papers}, |
|
booktitle = {3rd Workshop on Extraction and Evaluation of Knowledge Entities from Scientific Documents (EEKE2022), June 20-24, 2022, Cologne, Germany and Online}, |
|
month = {June}, |
|
year = {2022}, |
|
address = {Beijing, China}, |
|
url = {https://ceur-ws.org/Vol-3210/paper16.pdf} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
'classes': 'class.json', |
|
'function': 'assign.json', |
|
'topic': 'paper_new.json' |
|
} |
|
|
|
|
|
|
|
|
|
|
|
class SciGraph(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="function", version=VERSION, |
|
description="This part of my dataset covers extraction"), |
|
datasets.BuilderConfig(name="topic", version=VERSION, |
|
description="This part of my dataset covers generation") |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "function" |
|
|
|
def _info(self): |
|
classes = ['综述与进展', '论证与对比', '思考与探讨', '原理与计算', '技术与方法', '设计与应用'] |
|
if self.config.name == "function": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"abstract": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=classes, num_classes=len(classes)) |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"abstract": datasets.Value("string"), |
|
"keywords": datasets.features.Sequence(datasets.Value("string")) |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"split": "train", |
|
"classes": data_dir['classes'], |
|
"function": data_dir['function'], |
|
"topic": data_dir['topic'] |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"split": "test", |
|
"classes": data_dir['classes'], |
|
"function": data_dir['function'], |
|
"topic": data_dir['topic'] |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"split": "valid", |
|
"classes": data_dir['classes'], |
|
"function": data_dir['function'], |
|
"topic": data_dir['topic'] |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, split, classes, function, topic): |
|
if self.config.name == 'function': |
|
with open(classes, 'r') as f: |
|
functions = list(json.load(f).keys()) |
|
data = pd.read_json(function) |
|
data = data.loc[data[functions].sum(axis=1) == 1] |
|
data['label'] = [functions[row.tolist().index(1)] for index, row in data[functions].iterrows()] |
|
data = data[['_id', 'abstract', 'label']] |
|
|
|
|
|
train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42) |
|
|
|
test_data = pd.read_json(function) |
|
test_data = test_data.loc[test_data[functions].sum(axis=1) == 0] |
|
if split == 'train': |
|
for idx, row in train_data.iterrows(): |
|
yield idx, { |
|
"id": row._id, |
|
"abstract": row.abstract, |
|
"label": row.label |
|
} |
|
elif split == 'valid': |
|
for idx, row in valid_data.iterrows(): |
|
yield idx, { |
|
"id": row._id, |
|
"abstract": row.abstract, |
|
"label": row.label |
|
} |
|
elif split == 'test': |
|
for idx, row in test_data.iterrows(): |
|
yield idx, { |
|
"id": row._id, |
|
"abstract": row.abstract, |
|
"label": -1 |
|
} |
|
|
|
|
|
|
|
if self.config.name == 'topic': |
|
data = pd.read_json(topic) |
|
data = data.replace(to_replace=r'^\s*$', value=np.nan, regex=True).dropna(subset=['keywords'], axis=0) |
|
|
|
train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42) |
|
test_data = pd.read_json(topic) |
|
if split == 'train': |
|
for idx, row in train_data.iterrows(): |
|
yield idx, { |
|
"id": row._id, |
|
"abstract": row.abstract, |
|
"keywords": row.keywords.split('#%#') |
|
} |
|
elif split == 'valid': |
|
for idx, row in valid_data.iterrows(): |
|
yield idx, { |
|
"id": row._id, |
|
"abstract": row.abstract, |
|
"keywords": row.keywords.split('#%#') |
|
} |
|
elif split == 'test': |
|
for idx, row in test_data.iterrows(): |
|
yield idx, { |
|
"id": row._id, |
|
"abstract": row.abstract, |
|
"keywords": row.keywords.split('#%#') |
|
} |
|
|