Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
from datasets import load_dataset | |
from dynaword.datasheet import DataSheet | |
from dynaword.paths import repo_path | |
REMOVED_DATA = [ | |
"lexdk" | |
] # data that has been removed due to legal disputes, question about legality, or similar | |
def test_dataset_loads(): | |
"""Ensures that the dataset can load as intended""" | |
name = str(repo_path.resolve()) | |
ds = load_dataset(name, split="train", streaming=True) | |
sample = next(iter(ds)) | |
assert isinstance(sample, dict) | |
def test_all_datasets_in_yaml(): | |
ds_sheet = DataSheet.load_from_path(repo_path / "README.md") | |
ds_names = { | |
cfg["config_name"] | |
for cfg in ds_sheet.frontmatter["configs"] | |
if cfg["config_name"] != "default" | |
} | |
data_folder = repo_path / "data" | |
datasets = data_folder.glob("*") | |
for dataset in datasets: | |
if dataset.name not in REMOVED_DATA: | |
assert dataset.name in ds_names | |