jolma_split / jolma_split.py
thewall's picture
Update jolma_split.py
4ddcc42
import os.path
import re
import pandas as pd
import datasets
from functools import cached_property, cache
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{jolma2010multiplexed,
title={Multiplexed massively parallel SELEX for characterization of human transcription factor binding specificities},
author={Jolma, Arttu and Kivioja, Teemu and Toivonen, Jarkko and Cheng, Lu and Wei, Gonghong and Enge, Martin and \
Taipale, Mikko and Vaquerizas, Juan M and Yan, Jian and Sillanp{\"a}{\"a}, Mikko J and others},
journal={Genome research},
volume={20},
number={6},
pages={861--873},
year={2010},
publisher={Cold Spring Harbor Lab}
}
"""
_DESCRIPTION = """\
PRJEB3289
https://www.ebi.ac.uk/ena/browser/view/PRJEB3289
Data that has been generated by HT-SELEX experiments (see Jolma et al. 2010. PMID: 20378718 for description of method) \
that has been now used to generate transcription factor binding specificity models for most of the high confidence \
human transcription factors. Sequence data is composed of reads generated with Illumina Genome Analyzer IIX and \
HiSeq2000 instruments. Samples are composed of single read sequencing of synthetic DNA fragments with a fixed length \
randomized region or samples derived from such a initial library by selection with a sequence specific DNA binding \
protein. Originally multiple samples with different "barcode" tag sequences were run on the same Illumina sequencing \
lane but the released files have been already de-multiplexed, and the constant regions and "barcodes" of each sequence \
have been cut out of the sequencing reads to facilitate the use of data. Some of the files are composed of reads from \
multiple different sequencing lanes and due to this each of the names of the individual reads have been edited to show \
the flowcell and lane that was used to generate it. Barcodes and oligonucleotide designs are indicated in the names of \
individual entries. Depending of the selection ligand design, the sequences in each of these fastq-files are either \
14, 20, 30 or 40 bases long and had different flanking regions in both sides of the sequence. Each run entry is named \
in either of the following ways: Example 1) "BCL6B_DBD_AC_TGCGGG20NGA_1", where name is composed of following fields \
ProteinName_CloneType_Batch_BarcodeDesign_SelectionCycle. This experiment used barcode ligand TGCGGG20NGA, where both \
of the variable flanking constant regions are indicated as they were on the original sequence-reads. This ligand has \
been selected for one round of HT-SELEX using recombinant protein that contained the DNA binding domain of \
human transcription factor BCL6B. It also tells that the experiment was performed on batch of experiments named as "AC".\
Example 2) 0_TGCGGG20NGA_0 where name is composed of (zero)_BarcodeDesign_(zero) These sequences have been generated \
from sequencing of the initial non-selected pool. Same initial pools have been used in multiple experiments that were \
on different batches, thus for example this background sequence pool is the shared background for all of the following \
samples. BCL6B_DBD_AC_TGCGGG20NGA_1, ZNF784_full_AE_TGCGGG20NGA_3, DLX6_DBD_Y_TGCGGG20NGA_4 and MSX2_DBD_W_TGCGGG20NGA_2
"""
_DOWNLODE_MANAGER = datasets.DownloadManager()
_RESOURCE_URL = "https://huggingface.co/datasets/thewall/DeepBindWeight/resolve/main"
SELEX_INFO_FILE = _DOWNLODE_MANAGER.download(f"{_RESOURCE_URL}/ERP001824-deepbind.xlsx")
PROTEIN_INFO_FILE = _DOWNLODE_MANAGER.download(f"{_RESOURCE_URL}/ERP001824-UniprotKB.xlsx")
pattern = re.compile("(\d+)")
URL = "https://huggingface.co/datasets/thewall/jolma_split/resolve/main"
"""
p70c10s61087t100 p70:70种蛋白,c10:count>10的序列,s61087:总共61087条序列,t100:小于100条序列的蛋白质划分至训练集
"""
class JolmaSplitConfig(datasets.BuilderConfig):
def __init__(self, protein_prefix="", protein_suffix="", max_length=1000, max_gene_num=1,
aptamer_prefix="", aptamer_suffix="", **kwargs):
super(JolmaSplitConfig, self).__init__(**kwargs)
self.data_dir = kwargs.get("data_dir")
self.protein_prefix = protein_prefix
self.protein_suffix = protein_suffix
self.aptamer_prefix = aptamer_prefix
self.aptamer_suffix = aptamer_suffix
self.max_length = max_length
self.max_gene_num = max_gene_num
class JolmaSubset(datasets.GeneratorBasedBuilder):
SELEX_INFO = pd.read_excel(SELEX_INFO_FILE, index_col=0)
PROTEIN_INFO = pd.read_excel(PROTEIN_INFO_FILE, index_col=0)
BUILDER_CONFIGS = [
JolmaSplitConfig(name=key) for key in ["p70c10s61087t100", "p99c3s325414t1000", "p70c3s312303t100"]
]
DEFAULT_CONFIG_NAME = "p70c10s61087t100"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"identifier": datasets.Value("string"),
"seq": datasets.Value("string"),
"quality": datasets.Value("string"),
"count": datasets.Value("int32"),
"protein": datasets.Value("string"),
"protein_id": datasets.Value("string"),
}
),
homepage="https://www.ebi.ac.uk/ena/browser/view/PRJEB3289",
citation=_CITATION,
)
@cached_property
def selex_info(self):
return self.SELEX_INFO.loc[self.config.name]
@cached_property
def protein_info(self):
return self.PROTEIN_INFO.loc[self.config.name]
def design_length(self):
return int(pattern.search(self.protein_info["Ligand"]).group(0))
def get_selex_info(self, sra_id):
return self.SELEX_INFO.loc[sra_id]
def get_protein_info(self, sra_id):
return self.PROTEIN_INFO.loc[sra_id]
@cache
def get_design_length(self, sra_id):
return int(pattern.search(self.get_protein_info(sra_id)["Ligand"]).group(0))
def _split_generators(self, dl_manager):
if self.config.data_dir is not None and os.path.exists(self.config.data_dir):
train_file = os.path.join(self.config.data_dir, f"{self.config.name}_train.csv.gz")
test_file = os.path.join(self.config.data_dir, f"{self.config.name}_test.csv.gz")
else:
train_file = dl_manager.download(f"{URL}/{self.config.name}_train.csv.gz")
test_file = dl_manager.download(f"{URL}/{self.config.name}_test.csv.gz")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_file},),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_file}, ),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
data = pd.read_csv(filepath)
for key, row in data.iterrows():
sra_id = row["identifier"].split(":")[0]
protein_info = self.get_protein_info(sra_id)
proteins = protein_info["Sequence"]
gene_num = protein_info["Unique Gene"]
protein_id = protein_info["Entry"]
protein_seq = f"{self.config.protein_prefix}{proteins}{self.config.protein_suffix}"
aptamer_seq = f'{self.config.aptamer_prefix}{row["seq"]}{self.config.aptamer_suffix}'
if len(protein_seq)>self.config.max_length:
continue
if gene_num>self.config.max_gene_num:
continue
if str(proteins)=="nan" or len(str(proteins))==0:
continue
ans = {"id": key,
"protein": protein_seq,
"protein_id": protein_id,
"seq": aptamer_seq,
"identifier": row["identifier"],
"count": int(row["count"]),
"quality": row['quality']}
yield key, ans
if __name__=="__main__":
from datasets import load_dataset
dataset = load_dataset("jolma_split.py", split="all")