Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
File size: 1,194 Bytes
65faa6e 566156e 65faa6e 566156e 65faa6e 566156e 65faa6e 566156e 65faa6e 566156e 65faa6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import logging
from pathlib import Path
from typing import cast
from datasets import Dataset, load_dataset
from dynaword.datasheet import DataSheet
from dynaword.process_dataset import (
ensure_column_order,
# add_token_count,
# remove_duplicate_text,
# remove_empty_texts,
)
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
root = Path(__file__).parent
data_path = root / "data"
sheet = DataSheet.load_from_path(root / "README.md")
for dataset_cfg in sheet.frontmatter["configs"][1:]:
logger.info(f"Processing {dataset_cfg['config_name']}")
_data_path = data_path / dataset_cfg["config_name"]
ds = load_dataset(_data_path.as_posix(), split="train")
ds = cast(Dataset, ds)
ds = ds.remove_columns(["license", "metadata", "domain"])
# ds = add_token_count(ds)
# ds = remove_empty_texts(ds)
# ds = remove_duplicate_text(ds)
ds = ensure_column_order(ds)
# save dataset
ds.to_parquet(
_data_path / f"{dataset_cfg['config_name']}.parquet",
)
logger.info(f"Saved {dataset_cfg['config_name']}.parquet")
del ds
|