|
import os |
|
import tempfile |
|
import logging |
|
import requests |
|
import pandas as pd |
|
from tqdm import tqdm |
|
from rdkit import Chem |
|
import pooch |
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
from lobster.data import upload_to_s3 |
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
S3_BASE_URI = os.environ["S3_BASE_URI"] |
|
S3_RAW = f"{S3_BASE_URI}/raw" |
|
S3_PROCESSED = f"{S3_BASE_URI}/pre-processed" |
|
|
|
MAX_WORKERS = 16 |
|
|
|
MODALITY_MAPPING = { |
|
"polypeptide(L)": "amino_acid", |
|
"polynucleotide": "nucleotide", |
|
"polyribonucleotide": "nucleotide", |
|
"polynucleotide (RNA)": "nucleotide", |
|
"polynucleotide (DNA)": "nucleotide", |
|
"polydeoxyribonucleotide": "nucleotide", |
|
"polydeoxyribonucleotide/polyribonucleotide hybrid ": "nucleotide", |
|
"smiles": "smiles", |
|
} |
|
|
|
IDENTIFIERS = { |
|
"protein-peptide": "11033993", |
|
"protein-rna": "11033983", |
|
"protein-protein": "11033984", |
|
"rna-small_molecule": "11033985", |
|
"protein-small_molecule": "11033996", |
|
"protein-ion": "11033989", |
|
"protein-dna": "11033982", |
|
"small_molecule-small_molecule": "11033997", |
|
} |
|
|
|
def upload_raw_datasets_to_s3() -> None: |
|
for fname, identifier in IDENTIFIERS.items(): |
|
with tempfile.TemporaryDirectory() as tmpdir: |
|
local_path = pooch.retrieve( |
|
f"https://dataverse.harvard.edu/api/access/datafile/{identifier}", |
|
fname=f"{fname}.csv", |
|
known_hash=None, |
|
path=tmpdir, |
|
progressbar=True, |
|
) |
|
upload_to_s3( |
|
s3_uri=f"{S3_RAW}/{fname}.csv", |
|
local_filepath=local_path, |
|
) |
|
|
|
|
|
def _canonicalize_smiles(smiles: str) -> str | None: |
|
"""Canonicalize SMILES string.""" |
|
mol = Chem.MolFromSmiles(smiles) |
|
return Chem.MolToSmiles(mol, canonical=True) if mol else None |
|
|
|
|
|
def _fetch_pdb_data(pdb_id: str) -> list[dict]: |
|
"""Fetch molecular data from PDBe API for a given PDB ID.""" |
|
res = requests.get(f"https://www.ebi.ac.uk/pdbe/api/pdb/entry/molecules/{pdb_id}") |
|
|
|
if res.status_code != 200: |
|
raise Exception(f"Failed to fetch data for {pdb_id}: {res.status_code}") |
|
|
|
return res.json().get(pdb_id.lower(), []) |
|
|
|
|
|
def _fetch_smiles_data(ligand_id: str) -> str | None: |
|
"""Fetch SMILES string for ligand from PDBe.""" |
|
try: |
|
res = requests.get(f"https://www.ebi.ac.uk/pdbe/api/pdb/compound/summary/{ligand_id.lower()}") |
|
|
|
if res.status_code != 200: |
|
raise Exception(f"Failed to fetch data for {ligand_id}: {res.status_code}") |
|
|
|
ligand_data = res.json().get(ligand_id)[0] |
|
ligand_data = iter(ligand_data["smiles"]) |
|
while True: |
|
try: |
|
smiles = next(ligand_data)["name"] |
|
return _canonicalize_smiles(smiles) |
|
except StopIteration: |
|
break |
|
except Exception: |
|
continue |
|
except Exception as e: |
|
raise Exception(f"Failed to fetch SMILES for {ligand_id}") from e |
|
|
|
|
|
def _safe_extract(row_id: str, sequence_extractor: callable) -> dict | None: |
|
"""Safely extract sequences and modalities given a row ID and extractor.""" |
|
try: |
|
sequences, modalities = sequence_extractor(row_id) |
|
except Exception as e: |
|
logger.info(f"Error processing {row_id}: {e}") |
|
return None |
|
|
|
combined = list(zip(sequences, modalities)) |
|
combined.sort(key=lambda x: x[1] != "polypeptide(L)") |
|
sequences, modalities = zip(*combined) if combined else ([], []) |
|
|
|
sequences = (list(sequences) + [None] * 5)[:5] |
|
modalities = (list(modalities) + [None] * 5)[:5] |
|
|
|
return { |
|
"id": row_id, |
|
**{f"sequence{i + 1}": sequences[i] for i in range(5)}, |
|
**{f"modality{i + 1}": modalities[i] for i in range(5)}, |
|
} |
|
|
|
|
|
def process_rows(df: pd.DataFrame, sequence_extractor: callable, debug: bool = False) -> pd.DataFrame: |
|
""" |
|
Process rows in the dataframe to extract sequence and modality information. |
|
|
|
Parameters |
|
---------- |
|
df : pd.DataFrame |
|
Input dataframe with 'id' column. |
|
sequence_extractor : callable |
|
Function to extract sequences and modalities given a row id. |
|
debug : bool, optional |
|
If True, only processes first 10 rows. |
|
|
|
Returns |
|
------- |
|
pd.DataFrame |
|
Dataframe with sequence and modality columns. |
|
""" |
|
row_ids = df["id"].tolist()[:10] if debug else df["id"].tolist() |
|
results = [] |
|
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: |
|
futures = {executor.submit(_safe_extract, rid, sequence_extractor): rid for rid in row_ids} |
|
for future in tqdm(as_completed(futures), total=len(futures)): |
|
result = future.result() |
|
if result: |
|
results.append(result) |
|
return pd.DataFrame(results).replace(MODALITY_MAPPING) |
|
|
|
|
|
|
|
def extract_protein_nucleotide(row_id: str) -> tuple[list[str], list[str]]: |
|
"""Example ID: 3adi_1.pdb_3adi_1_RNA_D&E.pdb""" |
|
pdb_id = row_id.split("_")[0] |
|
pdb_data = _fetch_pdb_data(pdb_id) |
|
sequences, types = ( |
|
zip(*[(m["sequence"], m["molecule_type"]) for m in pdb_data if "sequence" in m]) if pdb_data else ([], []) |
|
) |
|
return list(sequences), list(types) |
|
|
|
|
|
def extract_protein_small_molecule(row_id: str) -> tuple[list[str], list[str]]: |
|
"""Example ID: 4h4e_1.pdb_4h4e_1_10G_D.pdb""" |
|
parts = row_id.split("_") |
|
pdb_id, ligand_id = parts[2], parts[4] |
|
pdb_data = _fetch_pdb_data(pdb_id) |
|
sequences, types = ( |
|
zip(*[(m["sequence"], m["molecule_type"]) for m in pdb_data if "sequence" in m]) if pdb_data else ([], []) |
|
) |
|
ligand = _fetch_smiles_data(ligand_id) |
|
if ligand: |
|
sequences += (ligand,) |
|
types += ("smiles",) |
|
return list(sequences), list(types) |
|
|
|
|
|
def extract_interacting_chains(row_id: str) -> tuple[list[str], list[str]]: |
|
"""Example ID: 2uxq_2_A_B""" |
|
pdb_id, _, chain_a, chain_b = row_id.split("_") |
|
chains_of_interest = {chain_a, chain_b} |
|
molecules = _fetch_pdb_data(pdb_id) |
|
chain_map = {} |
|
for m in molecules: |
|
if "sequence" not in m or "in_chains" not in m: |
|
continue |
|
for chain in m["in_chains"]: |
|
if chain in chains_of_interest: |
|
chain_map[chain] = (m["sequence"], m["molecule_type"]) |
|
sequences, types = [], [] |
|
for chain in [chain_a, chain_b]: |
|
if chain in chain_map: |
|
seq, mol_type = chain_map[chain] |
|
sequences.append(seq) |
|
types.append(mol_type) |
|
else: |
|
sequences.append(None) |
|
types.append(None) |
|
return sequences, types |
|
|
|
|
|
def process_protein_nucleotide(debug: bool = False) -> None: |
|
df = pd.read_csv(f"{S3_RAW}/protein-dna.csv", sep="\t") |
|
df_out = process_rows(df, extract_protein_nucleotide, debug=debug) |
|
df_out = df_out.drop_duplicates().reset_index(drop=True) |
|
df_out.to_parquet(f"{S3_PROCESSED}/protein-dna.parquet", index=False) |
|
|
|
df = pd.read_csv(f"{S3_RAW}/protein-rna.csv", sep="\t") |
|
df_out = process_rows(df, extract_protein_nucleotide, debug=debug) |
|
df_out = df_out.drop_duplicates().reset_index(drop=True) |
|
df_out.to_parquet(f"{S3_PROCESSED}/protein-rna.parquet", index=False) |
|
|
|
|
|
def process_protein_protein(debug: bool = False) -> None: |
|
df = pd.read_csv(f"{S3_RAW}/protein-protein.csv", sep="\t") |
|
df_out = process_rows(df, extract_interacting_chains, debug=debug) |
|
df_out = df_out.drop_duplicates().reset_index(drop=True) |
|
df_out.to_parquet(f"{S3_PROCESSED}/protein-protein.parquet", index=False) |
|
|
|
|
|
def process_protein_small_molecule(debug: bool = False) -> None: |
|
df = pd.read_csv(f"{S3_RAW}/protein-small_molecule.csv", sep="\t") |
|
df_out = process_rows(df, extract_protein_small_molecule, debug=debug) |
|
df_out = df_out.dropna(how="all", axis=1).drop_duplicates().reset_index(drop=True) |
|
df_out.to_parquet(f"{S3_PROCESSED}/protein-small_molecule.parquet", index=False) |
|
|
|
|
|
def process_smiles_smiles() -> None: |
|
df = pd.read_csv(f"{S3_RAW}/small_molecule-small_molecule.csv") |
|
df = df["id"].str.split("_", expand=True) |
|
df.columns = ["id", "sequence1", "num_1", "sequence2", "num_2"] |
|
df = df[df["sequence1"] != df["sequence2"]][["sequence1", "sequence2"]] |
|
df.insert(1, "modality1", "smiles") |
|
df.insert(3, "modality2", "smiles") |
|
df = df.drop_duplicates().reset_index(drop=True) |
|
df.to_parquet(f"{S3_PROCESSED}/small_molecule-small_molecule.parquet", index=False) |
|
|
|
|
|
if __name__ == "__main__": |
|
upload_raw_datasets_to_s3() |
|
process_protein_nucleotide(debug=False) |
|
process_protein_protein(debug=False) |
|
process_protein_small_molecule(debug=False) |
|
process_smiles_smiles() |
|
|