|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PMC Open Access Subset sections parsed (plain text)""" |
|
|
|
import datetime |
|
import pandas as pd |
|
import numpy as np |
|
from itertools import compress, chain |
|
from collections import defaultdict |
|
import re |
|
from lxml import etree |
|
import json |
|
import html |
|
import unicodedata |
|
|
|
import datasets |
|
from datasets.tasks import LanguageModeling |
|
|
|
|
|
|
|
|
|
_CITATION = "" |
|
|
|
_DESCRIPTION = """\ |
|
The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under |
|
license terms that allow reuse. |
|
Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles |
|
in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more |
|
liberal redistribution and reuse than a traditional copyrighted work. |
|
The PMC Open Access Subset is one part of the PMC Article Datasets |
|
|
|
This version takes XML version as source, benefiting from the structured text |
|
to split the articles in sections, naming the introduction, methods, results, |
|
discussion and conclusion, front, body and back. XML is then removed and format |
|
it to plain text. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/" |
|
|
|
|
|
_LICENSE = """ |
|
https://www.ncbi.nlm.nih.gov/pmc/about/copyright/ |
|
|
|
Within the PMC Open Access Subset, there are three groupings: |
|
|
|
Commercial Use Allowed - CC0, CC BY, CC BY-SA, CC BY-ND licenses |
|
Non-Commercial Use Only - CC BY-NC, CC BY-NC-SA, CC BY-NC-ND licenses; and |
|
Other - no machine-readable Creative Commons license, no license, or a custom license. |
|
""" |
|
|
|
_URL_ROOT = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/" |
|
_URL = _URL_ROOT+"oa_bulk/{subset}/xml/" |
|
|
|
_SUBSETS = { |
|
"commercial": "oa_comm", |
|
"non_commercial": "oa_noncomm", |
|
"other": "oa_other", |
|
} |
|
_BASELINE_DATE = "2023-12-18" |
|
|
|
begin_doc_rgx = re.compile("""<!DOCTYPE.*""") |
|
def clean_raw(xml_text): |
|
""" |
|
Fixes the formating of xml of files and returns it. |
|
Some have bad formating but they can be fixed/improved |
|
""" |
|
|
|
|
|
|
|
begin_doc = begin_doc_rgx.search(xml_text) |
|
xml_text = xml_text[begin_doc.start():] |
|
|
|
|
|
xml_text = re.sub('\s+',' ',xml_text) |
|
return xml_text |
|
|
|
def construct_datadict(article_tree): |
|
""" |
|
Where the magic happens. A long script that: |
|
- Remove the references (and what is referenced to) from the text |
|
- Extract paragraphs and titles with their path in the document |
|
- Titles are used to identify ["introduction", "methods", "results" and "discussion"] |
|
- The path are then used to group paragraphs and titles into corresponding content. |
|
- Remaining p and title are put in three other section: front, body, back |
|
|
|
Returns: |
|
- content_d: Dictionnary with the content result |
|
|
|
Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/ |
|
""" |
|
res_content_d = {} |
|
|
|
refs_el = article_tree.find(".//ref-list") |
|
if refs_el is not None: |
|
refs_el.getparent().remove(refs_el) |
|
|
|
|
|
ref_el_l = article_tree.xpath(".//fig|.//table-wrap|.//array|.//supplementary-material\ |
|
|.//inline-supplementary-material|.//disp-formula\ |
|
|.//inline-formula|.//graphic|.//inline-graphic\ |
|
|.//media|.//inline-media|.//boxed-text\ |
|
|.//table-wrap-foot|.//fn-group|.//chem-struct-wrap\ |
|
|.//code|.//disp-quote|.//speech") |
|
for el in ref_el_l[::-1]: |
|
repl_xref = etree.Element("xref") |
|
repl_xref.tail = el.tail |
|
el.addprevious(repl_xref) |
|
el.getparent().remove(el) |
|
|
|
path_l, text_l = [], [] |
|
t_paths, t_texts_lowcase = [], [] |
|
for part in ["front", "body", "back"]: |
|
tmp_path_l, tmp_text_l = [], [] |
|
tmp_t_paths, tmp_t_texts_lowcase = [], [] |
|
part_el = article_tree.find(".//"+part) |
|
if part_el is None: |
|
res_content_d[part] = [] |
|
continue |
|
|
|
|
|
for el in part_el.xpath(".//p[not(ancestor::p) and not(ancestor::title)]| .//title[not(ancestor::p) and not(ancestor::title)]"): |
|
new_text = " ".join(el.itertext()) |
|
new_text = unicodedata.normalize("NFKD", html.unescape(new_text)) |
|
tmp_path_l.append(article_tree.getelementpath(el)) |
|
tmp_text_l.append(new_text) |
|
if el.tag=="title": |
|
tmp_t_paths.append(tmp_path_l[-1]) |
|
tmp_t_texts_lowcase.append(new_text.lower()) |
|
if part=="body": |
|
path_l, text_l = tmp_path_l, tmp_text_l |
|
t_paths, t_texts_lowcase = tmp_t_paths, tmp_t_texts_lowcase |
|
else: |
|
res_content_d[part] = tmp_text_l |
|
|
|
|
|
mask_intro = np.array(["introduction" in t_text or "background" in t_text for t_text in t_texts_lowcase]).astype(bool) |
|
mask_metho = np.array(["method" in t_text for t_text in t_texts_lowcase]).astype(bool) |
|
mask_resul = np.array(["result" in t_text for t_text in t_texts_lowcase]).astype(bool) |
|
mask_discu = np.array(["discussion" in t_text for t_text in t_texts_lowcase]).astype(bool) |
|
mask_concl = np.array(["conclusion" in t_text for t_text in t_texts_lowcase]).astype(bool) |
|
processed_mask = np.zeros(len(text_l), dtype="bool") |
|
for mask, name_section in zip([mask_intro, mask_metho, mask_resul, mask_discu, mask_concl], |
|
["introduction", "methods", "results", "discussion", "conclusion"]): |
|
if not np.any(mask): |
|
res_content_d[name_section] = [] |
|
continue |
|
|
|
filtered_path_l = list(compress(t_paths, mask)) |
|
levels = np.array([len(path.split("/")) for path in filtered_path_l]) |
|
root_path = filtered_path_l[np.argmin(levels)] |
|
root_path = root_path[:root_path.rindex("/")] |
|
mask_contents = np.array([path.startswith(root_path) for path in path_l]).astype(bool) |
|
processed_mask |= mask_contents |
|
res_content_d[name_section] = list(compress(text_l, mask_contents)) |
|
|
|
processed_mask = ~processed_mask |
|
res_content_d["body"] = list(compress(text_l, processed_mask)) |
|
|
|
return res_content_d |
|
|
|
class OpenAccessXMLConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for the PMC Open Access Subset.""" |
|
|
|
def __init__(self, subsets=None, **kwargs): |
|
"""BuilderConfig for the PMC Open Access Subset. |
|
Args: |
|
subsets (:obj:`List[str]`): List of subsets/groups to load. |
|
**kwargs: Keyword arguments forwarded to super. |
|
""" |
|
subsets = [subsets] if isinstance(subsets, str) else subsets |
|
super().__init__( |
|
name="+".join(subsets), **kwargs, |
|
) |
|
self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys()) |
|
|
|
|
|
class OpenAccessXML(datasets.GeneratorBasedBuilder): |
|
"""PMC Open Access Subset enriched from XML files.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIG_CLASS = OpenAccessXMLConfig |
|
BUILDER_CONFIGS = [OpenAccessXMLConfig(subsets="all")] + [OpenAccessXMLConfig(subsets=subset) for subset in _SUBSETS] |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"accession_id": datasets.Value("string"), |
|
"pmid": datasets.Value("string"), |
|
|
|
"introduction": datasets.Value("string"), |
|
"methods": datasets.Value("string"), |
|
"results": datasets.Value("string"), |
|
"discussion": datasets.Value("string"), |
|
"conclusion": datasets.Value("string"), |
|
|
|
"front": datasets.Value("string"), |
|
"body": datasets.Value("string"), |
|
"back": datasets.Value("string"), |
|
|
|
"license": datasets.Value("string"), |
|
"retracted": datasets.Value("string"), |
|
"last_updated": datasets.Value("string"), |
|
"citation": datasets.Value("string"), |
|
"package_file": datasets.Value("string"), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=[LanguageModeling(text_column="content")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
incremental_paths = { |
|
"incremental_file_lists": [], |
|
"incremental_archives": [] |
|
} |
|
|
|
baseline_package_list = dl_manager.download(f"{_URL_ROOT}oa_file_list.csv") |
|
|
|
baseline_file_lists = [] |
|
baseline_archives = [] |
|
for subset in self.config.subsets: |
|
url = _URL.format(subset=_SUBSETS[subset]) |
|
basename = f"{_SUBSETS[subset]}_xml." |
|
|
|
baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(10) if (subset != "non_commercial" or i > 0)] |
|
|
|
for baseline in baselines: |
|
baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" |
|
baseline_archive_url = f"{url}{basename}{baseline}.tar.gz" |
|
baseline_file_list = dl_manager.download(baseline_file_list_url) |
|
baseline_archive = dl_manager.download(baseline_archive_url) |
|
|
|
baseline_file_lists.append(baseline_file_list) |
|
baseline_archives.append(baseline_archive) |
|
|
|
baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" |
|
|
|
|
|
|
|
|
|
date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE) |
|
incremental_dates = [ |
|
(datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat() |
|
for i in range(date_delta.days) |
|
] |
|
incrementals = [f"incr.{date}" for date in incremental_dates] |
|
for incremental in incrementals: |
|
incremental_file_list_url = f"{url}{basename}{incremental}.filelist.csv" |
|
incremental_archive_url = f"{url}{basename}{incremental}.tar.gz" |
|
try: |
|
incremental_file_list = dl_manager.download(incremental_file_list_url) |
|
incremental_archive = dl_manager.download(incremental_archive_url) |
|
except FileNotFoundError: |
|
continue |
|
incremental_paths["incremental_file_lists"].append(incremental_file_list) |
|
incremental_paths["incremental_archives"].append(incremental_archive) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"baseline_file_lists": baseline_file_lists, |
|
"baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives], |
|
"baseline_package_list": baseline_package_list, |
|
"incremental_file_lists": incremental_paths["incremental_file_lists"], |
|
"incremental_archives": [dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, baseline_file_lists, baseline_archives, baseline_package_list, incremental_file_lists, incremental_archives): |
|
|
|
oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID") |
|
oa_package_list = oa_package_list[["File"]] |
|
oa_package_list.sort_index(inplace=True) |
|
processed_ids = set() |
|
|
|
|
|
if incremental_file_lists: |
|
for incremental_file_list, incremental_archive in zip(incremental_file_lists[::-1], incremental_archives[::-1]): |
|
try: |
|
incrementals = pd.read_csv(incremental_file_list, index_col="AccessionID") |
|
except FileNotFoundError: |
|
continue |
|
incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File") |
|
incrementals.File = incrementals.File.fillna('') |
|
incrementals = incrementals.to_dict(orient="index") |
|
|
|
for path, file in incremental_archive: |
|
data = incrementals.pop(path) |
|
pmcid = data["AccessionID"] |
|
if pmcid in processed_ids: |
|
continue |
|
content = file.read() |
|
try: |
|
text = content.decode("utf-8").strip() |
|
except UnicodeDecodeError as e: |
|
text = content.decode("latin-1").strip() |
|
text = clean_raw(text) |
|
try: |
|
article_tree = etree.ElementTree(etree.fromstring(text)) |
|
except etree.XMLSyntaxError: |
|
continue |
|
|
|
content_d = construct_datadict(article_tree) |
|
data = { |
|
"introduction": "\n".join(content_d["introduction"]), |
|
"methods": "\n".join(content_d["methods"]), |
|
"results": "\n".join(content_d["results"]), |
|
"discussion": "\n".join(content_d["discussion"]), |
|
"conclusion": "\n".join(content_d["conclusion"]), |
|
"front": "\n".join(content_d["front"]), |
|
"body": "\n".join(content_d["body"]), |
|
"back": "\n".join(content_d["back"]), |
|
"pmid": data["PMID"], |
|
"accession_id": pmcid, |
|
"license": data["License"], |
|
"last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
|
"retracted": data["Retracted"], |
|
"citation": data["Article Citation"], |
|
"package_file": data["File"], |
|
} |
|
processed_ids.add(pmcid) |
|
yield pmcid, data |
|
|
|
|
|
for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives): |
|
|
|
|
|
baselines = pd.read_csv(baseline_file_list, index_col="AccessionID") |
|
baselines = baselines.join(oa_package_list).reset_index().set_index("Article File") |
|
baselines.File = baselines.File.fillna('') |
|
baselines = baselines.to_dict(orient="index") |
|
|
|
for path, file in baseline_archive: |
|
data = baselines.pop(path) |
|
pmcid = data["AccessionID"] |
|
if pmcid in processed_ids: |
|
continue |
|
content = file.read() |
|
try: |
|
text = content.decode("utf-8").strip() |
|
except UnicodeDecodeError as e: |
|
text = content.decode("latin-1").strip() |
|
text = clean_raw(text) |
|
try: |
|
article_tree = etree.ElementTree(etree.fromstring(text)) |
|
except etree.XMLSyntaxError: |
|
continue |
|
|
|
content_d = construct_datadict(article_tree) |
|
data = { |
|
"introduction": "\n".join(content_d["introduction"]), |
|
"methods": "\n".join(content_d["methods"]), |
|
"results": "\n".join(content_d["results"]), |
|
"discussion": "\n".join(content_d["discussion"]), |
|
"conclusion": "\n".join(content_d["conclusion"]), |
|
"front": "\n".join(content_d["front"]), |
|
"body": "\n".join(content_d["body"]), |
|
"back": "\n".join(content_d["back"]), |
|
"pmid": data["PMID"], |
|
"accession_id": pmcid, |
|
"license": data["License"], |
|
"last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
|
"retracted": data["Retracted"], |
|
"citation": data["Article Citation"], |
|
"package_file": data["File"], |
|
} |
|
processed_ids.add(pmcid) |
|
yield pmcid, data |
|
|