Datasets:
Tasks:
Text2Text Generation
Modalities:
Text
Formats:
parquet
Size:
10M - 100M
Tags:
biology
License:
Upload preprocess.py with huggingface_hub
Browse files- preprocess.py +31 -0
preprocess.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import T5Tokenizer
|
2 |
+
import polars as pl
|
3 |
+
from tqdm import tqdm
|
4 |
+
from functools import partial
|
5 |
+
|
6 |
+
|
7 |
+
def detokenize(seq: str, tokenizer: T5Tokenizer):
|
8 |
+
output = tokenizer.decode(seq)
|
9 |
+
output = output.replace(" ", "")
|
10 |
+
output = output.replace("</s>", "")
|
11 |
+
|
12 |
+
assert len(output) == len(seq[:-1])
|
13 |
+
return output
|
14 |
+
|
15 |
+
|
16 |
+
def main():
|
17 |
+
tokenizer = T5Tokenizer.from_pretrained('Rostlab/ProstT5', do_lower_case=False) # noqa
|
18 |
+
splits = {'test': 'data/test-00000-of-00001-b109fa020c25190c.parquet', 'valid': 'data/valid-00000-of-00001-6442282fee0bc004.parquet', 'train': 'data/train-*-of-*.parquet'} # noqa
|
19 |
+
|
20 |
+
detokenize_func = partial(detokenize, tokenizer=tokenizer)
|
21 |
+
|
22 |
+
for k, v in tqdm(splits.items()):
|
23 |
+
df = pl.scan_parquet('hf://datasets/Rostlab/ProstT5Dataset/' + v) # noqa
|
24 |
+
df = df.with_columns(pl.col("input_id_x").map_elements(detokenize_func, return_dtype=pl.String).alias("3di")) # noqa
|
25 |
+
df = df.with_columns(pl.col("input_id_y").map_elements(detokenize_func, return_dtype=pl.String).alias("protein")) # noqa
|
26 |
+
df = df.drop("input_id_x").drop("input_id_y")
|
27 |
+
df.sink_parquet(k + ".parquet")
|
28 |
+
|
29 |
+
|
30 |
+
if __name__ == "__main__":
|
31 |
+
main()
|