SciGraph / SciGraph.py
yanyc's picture
update
4491123
# -*- coding: utf-8 -*-
"""
@Project : indexing
@File : SciGraph
@Email : [email protected]
@Author : Yan Yuchen
@Time : 2023/3/9 12:53
"""
import json
import datasets
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
_CITATION = """\
@InProceedings{yan-EtAl:2022:Poster,
author = {Yuchen Yan and Chong Chen},
title = {SciGraph: A Knowledge Graph Constructed by Function and Topic Annotation of Scientific Papers},
booktitle = {3rd Workshop on Extraction and Evaluation of Knowledge Entities from Scientific Documents (EEKE2022), June 20-24, 2022, Cologne, Germany and Online},
month = {June},
year = {2022},
address = {Beijing, China},
url = {https://ceur-ws.org/Vol-3210/paper16.pdf}
}
"""
_DESCRIPTION = """\
"""
_HOMEPAGE = ""
# The license information was obtained from https://github.com/boudinfl/ake-datasets as the dataset shared over here is taken from here
_LICENSE = ""
_URLS = {
'classes': 'class.json',
'function': 'assign.json',
'topic': 'paper_new.json'
}
# TODO: Add link to the official dataset URLs here
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class SciGraph(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="function", version=VERSION,
description="This part of my dataset covers extraction"),
datasets.BuilderConfig(name="topic", version=VERSION,
description="This part of my dataset covers generation")
]
DEFAULT_CONFIG_NAME = "function"
def _info(self):
classes = ['综述与进展', '论证与对比', '思考与探讨', '原理与计算', '技术与方法', '设计与应用']
if self.config.name == "function": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"id": datasets.Value("string"),
"abstract": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=classes, num_classes=len(classes))
}
)
else:
features = datasets.Features(
{
"id": datasets.Value("string"),
"abstract": datasets.Value("string"),
"keywords": datasets.features.Sequence(datasets.Value("string"))
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"split": "train",
"classes": data_dir['classes'],
"function": data_dir['function'],
"topic": data_dir['topic']
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"split": "test",
"classes": data_dir['classes'],
"function": data_dir['function'],
"topic": data_dir['topic']
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"split": "valid",
"classes": data_dir['classes'],
"function": data_dir['function'],
"topic": data_dir['topic']
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, split, classes, function, topic):
if self.config.name == 'function':
with open(classes, 'r') as f:
functions = list(json.load(f).keys())
data = pd.read_json(function)
data = data.loc[data[functions].sum(axis=1) == 1]
data['label'] = [functions[row.tolist().index(1)] for index, row in data[functions].iterrows()]
data = data[['_id', 'abstract', 'label']]
train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42)
test_data = pd.read_json(function)
test_data = test_data.loc[test_data[functions].sum(axis=1) == 0]
if split == 'train':
for idx, row in train_data.iterrows():
yield idx, {
"id": row._id,
"abstract": row.abstract,
"label": row.label
}
elif split == 'valid':
for idx, row in valid_data.iterrows():
yield idx, {
"id": row._id,
"abstract": row.abstract,
"label": row.label
}
elif split == 'test':
for idx, row in test_data.iterrows():
yield idx, {
"id": row._id,
"abstract": row.abstract,
"label": -1
}
if self.config.name == 'topic':
data = pd.read_json(topic)
data = data.replace(to_replace=r'^\s*$', value=np.nan, regex=True).dropna(subset=['keywords'], axis=0)
train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42)
test_data = pd.read_json(topic)
if split == 'train':
for idx, row in train_data.iterrows():
yield idx, {
"id": row._id,
"abstract": row.abstract,
"keywords": row.keywords.split('#%#')
}
elif split == 'valid':
for idx, row in valid_data.iterrows():
yield idx, {
"id": row._id,
"abstract": row.abstract,
"keywords": row.keywords.split('#%#')
}
elif split == 'test':
for idx, row in test_data.iterrows():
yield idx, {
"id": row._id,
"abstract": row.abstract,
"keywords": row.keywords.split('#%#')
}