|  | import logging | 
					
						
						|  |  | 
					
						
						|  | import datasets | 
					
						
						|  | import gzip | 
					
						
						|  | import os | 
					
						
						|  | import pandas as pd | 
					
						
						|  | import re | 
					
						
						|  | import shutil | 
					
						
						|  | import urllib | 
					
						
						|  | from abc import ABC, abstractmethod | 
					
						
						|  | from datasets import DatasetInfo | 
					
						
						|  | from pathlib import Path | 
					
						
						|  | from pyfaidx import Fasta | 
					
						
						|  | from tqdm import tqdm | 
					
						
						|  | from typing import List | 
					
						
						|  |  | 
					
						
						|  | """ | 
					
						
						|  | -------------------------------------------------------------------------------------------- | 
					
						
						|  | Reference Genome URLS: | 
					
						
						|  | ------------------------------------------------------------------------------------------- | 
					
						
						|  | """ | 
					
						
						|  | H38_REFERENCE_GENOME_URL = ( | 
					
						
						|  | "https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/" "hg38.fa.gz" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | """ | 
					
						
						|  | -------------------------------------------------------------------------------------------- | 
					
						
						|  | Task Specific Handlers: | 
					
						
						|  | ------------------------------------------------------------------------------------------- | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | logger = logging.getLogger("multi_omics_transcript_expression") | 
					
						
						|  | logger.setLevel("INFO") | 
					
						
						|  |  | 
					
						
						|  | LABELS_V1 = [ | 
					
						
						|  | "Adipose Tissue", | 
					
						
						|  | "Adrenal Gland", | 
					
						
						|  | "Bladder", | 
					
						
						|  | "Blood", | 
					
						
						|  | "Blood Vessel", | 
					
						
						|  | "Brain", | 
					
						
						|  | "Breast", | 
					
						
						|  | "Cervix Uteri", | 
					
						
						|  | "Colon", | 
					
						
						|  | "Esophagus", | 
					
						
						|  | "Fallopian Tube", | 
					
						
						|  | "Heart", | 
					
						
						|  | "Kidney", | 
					
						
						|  | "Liver", | 
					
						
						|  | "Lung", | 
					
						
						|  | "Muscle", | 
					
						
						|  | "Nerve", | 
					
						
						|  | "Ovary", | 
					
						
						|  | "Pancreas", | 
					
						
						|  | "Pituitary", | 
					
						
						|  | "Prostate", | 
					
						
						|  | "Salivary Gland", | 
					
						
						|  | "Skin", | 
					
						
						|  | "Small Intestine", | 
					
						
						|  | "Spleen", | 
					
						
						|  | "Stomach", | 
					
						
						|  | "Testis", | 
					
						
						|  | "Thyroid", | 
					
						
						|  | "Uterus", | 
					
						
						|  | "Vagina", | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  | LABELS_V2 = [ | 
					
						
						|  | "Adipose_Subcutaneous", | 
					
						
						|  | "Adipose_Visceral (Omentum)", | 
					
						
						|  | "Adrenal Gland", | 
					
						
						|  | "Artery_Aorta", | 
					
						
						|  | "Artery_Coronary", | 
					
						
						|  | "Artery_Tibial", | 
					
						
						|  | "Bladder", | 
					
						
						|  | "Brain_Amygdala", | 
					
						
						|  | "Brain_Anterior cingulate cortex (BA24)", | 
					
						
						|  | "Brain_Caudate (basal ganglia)", | 
					
						
						|  | "Brain_Cerebellar Hemisphere", | 
					
						
						|  | "Brain_Cerebellum", | 
					
						
						|  | "Brain_Cortex", | 
					
						
						|  | "Brain_Frontal Cortex (BA9)", | 
					
						
						|  | "Brain_Hippocampus", | 
					
						
						|  | "Brain_Hypothalamus", | 
					
						
						|  | "Brain_Nucleus accumbens (basal ganglia)", | 
					
						
						|  | "Brain_Putamen (basal ganglia)", | 
					
						
						|  | "Brain_Spinal cord (cervical c-1)", | 
					
						
						|  | "Brain_Substantia nigra", | 
					
						
						|  | "Breast_Mammary Tissue", | 
					
						
						|  | "Cells_Cultured fibroblasts", | 
					
						
						|  | "Cells_EBV-transformed lymphocytes", | 
					
						
						|  | "Cervix_Ectocervix", | 
					
						
						|  | "Cervix_Endocervix", | 
					
						
						|  | "Colon_Sigmoid", | 
					
						
						|  | "Colon_Transverse", | 
					
						
						|  | "Esophagus_Gastroesophageal Junction", | 
					
						
						|  | "Esophagus_Mucosa", | 
					
						
						|  | "Esophagus_Muscularis", | 
					
						
						|  | "Fallopian Tube", | 
					
						
						|  | "Heart_Atrial Appendage", | 
					
						
						|  | "Heart_Left Ventricle", | 
					
						
						|  | "Kidney_Cortex", | 
					
						
						|  | "Kidney_Medulla", | 
					
						
						|  | "Liver", | 
					
						
						|  | "Lung", | 
					
						
						|  | "Minor Salivary Gland", | 
					
						
						|  | "Muscle_Skeletal", | 
					
						
						|  | "Nerve_Tibial", | 
					
						
						|  | "Ovary", | 
					
						
						|  | "Pancreas", | 
					
						
						|  | "Pituitary", | 
					
						
						|  | "Prostate", | 
					
						
						|  | "Skin_Not Sun Exposed (Suprapubic)", | 
					
						
						|  | "Skin_Sun Exposed (Lower leg)", | 
					
						
						|  | "Small Intestine_Terminal Ileum", | 
					
						
						|  | "Spleen", | 
					
						
						|  | "Stomach", | 
					
						
						|  | "Testis", | 
					
						
						|  | "Thyroid", | 
					
						
						|  | "Uterus", | 
					
						
						|  | "Vagina", | 
					
						
						|  | "Whole Blood", | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class GenomicLRATaskHandler(ABC): | 
					
						
						|  | """ | 
					
						
						|  | Abstract method for the Genomic LRA task handlers. Each handler | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | @abstractmethod | 
					
						
						|  | def __init__(self, **kwargs): | 
					
						
						|  | pass | 
					
						
						|  |  | 
					
						
						|  | @abstractmethod | 
					
						
						|  | def get_info(self, description: str) -> DatasetInfo: | 
					
						
						|  | """ | 
					
						
						|  | Returns the DatasetInfo for the task | 
					
						
						|  | """ | 
					
						
						|  | pass | 
					
						
						|  |  | 
					
						
						|  | def split_generators( | 
					
						
						|  | self, dl_manager, cache_dir_root | 
					
						
						|  | ) -> List[datasets.SplitGenerator]: | 
					
						
						|  | """ | 
					
						
						|  | Downloads required files using dl_manager and separates them by split. | 
					
						
						|  | """ | 
					
						
						|  | return [ | 
					
						
						|  | datasets.SplitGenerator( | 
					
						
						|  | name=datasets.Split.TRAIN, | 
					
						
						|  | gen_kwargs={"handler": self, "split": "train"}, | 
					
						
						|  | ), | 
					
						
						|  | datasets.SplitGenerator( | 
					
						
						|  | name=datasets.Split.TEST, gen_kwargs={"handler": self, "split": "test"} | 
					
						
						|  | ), | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  | @abstractmethod | 
					
						
						|  | def generate_examples(self, split): | 
					
						
						|  | """ | 
					
						
						|  | A generator that yields examples for the specified split. | 
					
						
						|  | """ | 
					
						
						|  | pass | 
					
						
						|  |  | 
					
						
						|  | @staticmethod | 
					
						
						|  | def hook(t): | 
					
						
						|  | last_b = [0] | 
					
						
						|  |  | 
					
						
						|  | def inner(b=1, bsize=1, tsize=None): | 
					
						
						|  | """ | 
					
						
						|  | b  : int, optional | 
					
						
						|  | Number of blocks just transferred [default: 1]. | 
					
						
						|  | bsize  : int, optional | 
					
						
						|  | Size of each block (in tqdm units) [default: 1]. | 
					
						
						|  | tsize  : int, optional | 
					
						
						|  | Total size (in tqdm units). If [default: None] remains unchanged. | 
					
						
						|  | """ | 
					
						
						|  | if tsize is not None: | 
					
						
						|  | t.total = tsize | 
					
						
						|  | t.update((b - last_b[0]) * bsize) | 
					
						
						|  | last_b[0] = b | 
					
						
						|  |  | 
					
						
						|  | return inner | 
					
						
						|  |  | 
					
						
						|  | def download_and_extract_gz(self, file_url, cache_dir_root): | 
					
						
						|  | """ | 
					
						
						|  | Downloads and extracts a gz file into the given cache directory. Returns the full file path | 
					
						
						|  | of the extracted gz file. | 
					
						
						|  | Args: | 
					
						
						|  | file_url: url of the gz file to be downloaded and extracted. | 
					
						
						|  | cache_dir_root: Directory to extract file into. | 
					
						
						|  | """ | 
					
						
						|  | file_fname = Path(file_url).stem | 
					
						
						|  | file_complete_path = os.path.join(cache_dir_root, "downloads", file_fname) | 
					
						
						|  |  | 
					
						
						|  | if not os.path.exists(file_complete_path): | 
					
						
						|  | if not os.path.exists(file_complete_path + ".gz"): | 
					
						
						|  | with tqdm( | 
					
						
						|  | unit="B", | 
					
						
						|  | unit_scale=True, | 
					
						
						|  | unit_divisor=1024, | 
					
						
						|  | miniters=1, | 
					
						
						|  | desc=file_url.split("/")[-1], | 
					
						
						|  | ) as t: | 
					
						
						|  | urllib.request.urlretrieve( | 
					
						
						|  | file_url, file_complete_path + ".gz", reporthook=self.hook(t) | 
					
						
						|  | ) | 
					
						
						|  | with gzip.open(file_complete_path + ".gz", "rb") as file_in: | 
					
						
						|  | with open(file_complete_path, "wb") as file_out: | 
					
						
						|  | shutil.copyfileobj(file_in, file_out) | 
					
						
						|  | return file_complete_path | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class TranscriptExpressionHandler(GenomicLRATaskHandler): | 
					
						
						|  | """ | 
					
						
						|  | Handler for the Transcript Expression task. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | DEFAULT_LENGTH = 200000 | 
					
						
						|  | DEFAULT_FILTER_OUT_LENGTH = 196608 | 
					
						
						|  |  | 
					
						
						|  | def __init__( | 
					
						
						|  | self, | 
					
						
						|  | sequence_length: int = DEFAULT_LENGTH, | 
					
						
						|  | filter_out_sequence_length: int = DEFAULT_FILTER_OUT_LENGTH, | 
					
						
						|  | **kwargs, | 
					
						
						|  | ): | 
					
						
						|  | """ | 
					
						
						|  | Creates a new handler for the Transcrpt Expression Prediction Task. | 
					
						
						|  | Args: | 
					
						
						|  | sequence_length: Length of the sequence around the TSS_CAGE start site | 
					
						
						|  | Instance Vars: | 
					
						
						|  | reference_genome: The Fasta extracted reference genome. | 
					
						
						|  | coordinate_csv_file: The csv file that stores the coordinates and filename of the target | 
					
						
						|  | labels_csv_file: The csv file that stores the labels with one sample per row. | 
					
						
						|  | sequence_length: Sequence length for this handler. | 
					
						
						|  | """ | 
					
						
						|  | self.reference_genome = None | 
					
						
						|  | self.coordinate_csv_file = None | 
					
						
						|  | self.labels_csv_file = None | 
					
						
						|  | self.sequence_length = sequence_length | 
					
						
						|  | self.filter_out_sequence_length = filter_out_sequence_length | 
					
						
						|  |  | 
					
						
						|  | if filter_out_sequence_length is not None: | 
					
						
						|  | assert isinstance(filter_out_sequence_length, int) | 
					
						
						|  | assert ( | 
					
						
						|  | sequence_length <= filter_out_sequence_length | 
					
						
						|  | ), f"{sequence_length=} > {filter_out_sequence_length=}" | 
					
						
						|  | assert isinstance(sequence_length, int) | 
					
						
						|  |  | 
					
						
						|  | def get_info(self, description: str) -> DatasetInfo: | 
					
						
						|  | """ | 
					
						
						|  | Returns the DatasetInfor for the Transcript Expression dataset. Each example | 
					
						
						|  | includes a genomic sequence and a list of label values. | 
					
						
						|  | """ | 
					
						
						|  | features = datasets.Features( | 
					
						
						|  | { | 
					
						
						|  |  | 
					
						
						|  | "DNA": datasets.Value("string"), | 
					
						
						|  |  | 
					
						
						|  | "labels": datasets.Sequence(datasets.Value("float32")), | 
					
						
						|  | "labels_name": datasets.Sequence(datasets.Value("string")), | 
					
						
						|  |  | 
					
						
						|  | "chromosome": datasets.Value(dtype="string"), | 
					
						
						|  | "RNA": datasets.Value("string"), | 
					
						
						|  | "Protein": datasets.Value("string"), | 
					
						
						|  | } | 
					
						
						|  | ) | 
					
						
						|  | return datasets.DatasetInfo( | 
					
						
						|  |  | 
					
						
						|  | description=description, | 
					
						
						|  |  | 
					
						
						|  | features=features, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def split_generators(self, dl_manager, cache_dir_root): | 
					
						
						|  | """ | 
					
						
						|  | Separates files by split and stores filenames in instance variables. | 
					
						
						|  | The Transcript Expression dataset requires the reference hg19 genome, coordinate | 
					
						
						|  | csv file,and label csv file to be saved. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | reference_genome_file = self.download_and_extract_gz( | 
					
						
						|  | H38_REFERENCE_GENOME_URL, cache_dir_root | 
					
						
						|  | ) | 
					
						
						|  | self.reference_genome = Fasta(reference_genome_file, one_based_attributes=False) | 
					
						
						|  |  | 
					
						
						|  | self.df_csv_file = dl_manager.download_and_extract( | 
					
						
						|  | "transcript_expression/GTEx_df_v1.csv" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | return super().split_generators(dl_manager, cache_dir_root) | 
					
						
						|  |  | 
					
						
						|  | def generate_examples(self, split): | 
					
						
						|  | """ | 
					
						
						|  | A generator which produces examples for the given split, each with a sequence | 
					
						
						|  | and the corresponding labels. The sequences are padded to the correct sequence | 
					
						
						|  | length and standardized before returning. | 
					
						
						|  | """ | 
					
						
						|  | df = pd.read_csv(self.df_csv_file) | 
					
						
						|  | df = df.loc[df["chr"]!= "chrMT"] | 
					
						
						|  | labels_name = LABELS_V1 | 
					
						
						|  |  | 
					
						
						|  | split_df = df.loc[df["split"] == split] | 
					
						
						|  |  | 
					
						
						|  | key = 0 | 
					
						
						|  | for idx, coordinates_row in split_df.iterrows(): | 
					
						
						|  | start = coordinates_row["position"] - 1 | 
					
						
						|  |  | 
					
						
						|  | chromosome = coordinates_row["chr"] | 
					
						
						|  | labels_row = coordinates_row[LABELS_V1] | 
					
						
						|  | padded_sequence = pad_sequence( | 
					
						
						|  | chromosome=self.reference_genome[chromosome], | 
					
						
						|  | start=start, | 
					
						
						|  | sequence_length=self.sequence_length, | 
					
						
						|  | negative_strand=coordinates_row["strand"] == "-", | 
					
						
						|  | filter_out_sequence_length=self.filter_out_sequence_length, | 
					
						
						|  | ) | 
					
						
						|  | if padded_sequence: | 
					
						
						|  | yield key, { | 
					
						
						|  | "labels_name": labels_name, | 
					
						
						|  | "labels": labels_row.to_numpy(), | 
					
						
						|  | "DNA": standardize_sequence(padded_sequence), | 
					
						
						|  | "chromosome": re.sub("chr", "", chromosome), | 
					
						
						|  | "RNA": coordinates_row["RNA"], | 
					
						
						|  | "Protein": coordinates_row["Protein"], | 
					
						
						|  | } | 
					
						
						|  | key += 1 | 
					
						
						|  | logger.info( | 
					
						
						|  | f"filtering out {len(split_df)-key} " | 
					
						
						|  | f"elements from the dataset" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | """ | 
					
						
						|  | -------------------------------------------------------------------------------------------- | 
					
						
						|  | Dataset loader: | 
					
						
						|  | ------------------------------------------------------------------------------------------- | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | _DESCRIPTION = """ | 
					
						
						|  | Dataset for benchmark of genomic deep learning models. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class GenomicsLRAConfig(datasets.BuilderConfig): | 
					
						
						|  | """ | 
					
						
						|  | BuilderConfig. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, *args, **kwargs): | 
					
						
						|  | """BuilderConfig for the location tasks dataset. | 
					
						
						|  | Args: | 
					
						
						|  | **kwargs: keyword arguments forwarded to super. | 
					
						
						|  | """ | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.handler = TranscriptExpressionHandler(**kwargs) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class GenomicsLRATasks(datasets.GeneratorBasedBuilder): | 
					
						
						|  | """ | 
					
						
						|  | Tasks to annotate human genome. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | VERSION = datasets.Version("1.1.0") | 
					
						
						|  | BUILDER_CONFIG_CLASS = GenomicsLRAConfig | 
					
						
						|  |  | 
					
						
						|  | def _info(self) -> DatasetInfo: | 
					
						
						|  | return self.config.handler.get_info(description=_DESCRIPTION) | 
					
						
						|  |  | 
					
						
						|  | def _split_generators( | 
					
						
						|  | self, dl_manager: datasets.DownloadManager | 
					
						
						|  | ) -> List[datasets.SplitGenerator]: | 
					
						
						|  | """ | 
					
						
						|  | Downloads data files and organizes it into train/test/val splits | 
					
						
						|  | """ | 
					
						
						|  | return self.config.handler.split_generators(dl_manager, self._cache_dir_root) | 
					
						
						|  |  | 
					
						
						|  | def _generate_examples(self, handler, split): | 
					
						
						|  | """ | 
					
						
						|  | Read data files and create examples(yield) | 
					
						
						|  | Args: | 
					
						
						|  | handler: The handler for the current task | 
					
						
						|  | split: A string in ['train', 'test', 'valid'] | 
					
						
						|  | """ | 
					
						
						|  | yield from handler.generate_examples(split) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | """ | 
					
						
						|  | -------------------------------------------------------------------------------------------- | 
					
						
						|  | Global Utils: | 
					
						
						|  | ------------------------------------------------------------------------------------------- | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def standardize_sequence(sequence: str): | 
					
						
						|  | """ | 
					
						
						|  | Standardizes the sequence by replacing all unknown characters with N and | 
					
						
						|  | converting to all uppercase. | 
					
						
						|  | Args: | 
					
						
						|  | sequence: genomic sequence to standardize | 
					
						
						|  | """ | 
					
						
						|  | pattern = "[^ATCG]" | 
					
						
						|  |  | 
					
						
						|  | sequence = sequence.upper() | 
					
						
						|  |  | 
					
						
						|  | sequence = re.sub(pattern, "N", sequence) | 
					
						
						|  | return sequence | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def pad_sequence( | 
					
						
						|  | chromosome, | 
					
						
						|  | start, | 
					
						
						|  | sequence_length, | 
					
						
						|  | negative_strand=False, | 
					
						
						|  | filter_out_sequence_length=None, | 
					
						
						|  | ): | 
					
						
						|  | """ | 
					
						
						|  | Extends a given sequence to length sequence_length. If | 
					
						
						|  | padding to the given length is outside the gene, returns | 
					
						
						|  | None. | 
					
						
						|  | Args: | 
					
						
						|  | chromosome: Chromosome from pyfaidx extracted Fasta. | 
					
						
						|  | start: Start index of original sequence. | 
					
						
						|  | sequence_length: Desired sequence length. If sequence length is odd, the | 
					
						
						|  | remainder is added to the end of the sequence. | 
					
						
						|  | end: End index of original sequence. If no end is specified, it creates a | 
					
						
						|  | centered sequence around the start index. | 
					
						
						|  | negative_strand: If negative_strand, returns the reverse compliment of the sequence | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | pad = sequence_length // 2 | 
					
						
						|  | end = start + pad + (sequence_length % 2) | 
					
						
						|  | start = start - pad | 
					
						
						|  |  | 
					
						
						|  | if filter_out_sequence_length is not None: | 
					
						
						|  | filter_out_pad = filter_out_sequence_length // 2 | 
					
						
						|  | filter_out_end = start + filter_out_pad + (filter_out_sequence_length % 2) | 
					
						
						|  | filter_out_start = start - filter_out_pad | 
					
						
						|  |  | 
					
						
						|  | if filter_out_start < 0 or filter_out_end >= len(chromosome): | 
					
						
						|  | return | 
					
						
						|  |  | 
					
						
						|  | if start < 0 or end >= len(chromosome): | 
					
						
						|  | return | 
					
						
						|  |  | 
					
						
						|  | if negative_strand: | 
					
						
						|  | return chromosome[start:end].reverse.complement.seq | 
					
						
						|  | return chromosome[start:end].seq | 
					
						
						|  |  |