Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
File size: 916 Bytes
78108d3 65faa6e a5ac9e2 65faa6e a3c400c 78108d3 a5ac9e2 78108d3 936cd0c 7ed5f43 78108d3 3e28a50 a5ac9e2 65faa6e a3c400c 65faa6e a3c400c 78108d3 a3c400c 78108d3 a3c400c ed22468 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
from datasets import load_dataset
from dynaword.datasheet import DataSheet
from dynaword.paths import repo_path
REMOVED_DATA = [
"lexdk"
] # data that has been removed due to legal disputes, question about legality, or similar
def test_dataset_loads():
"""Ensures that the dataset can load as intended"""
name = str(repo_path.resolve())
ds = load_dataset(name, split="train", streaming=True)
sample = next(iter(ds))
assert isinstance(sample, dict)
def test_all_datasets_in_yaml():
ds_sheet = DataSheet.load_from_path(repo_path / "README.md")
ds_names = {
cfg["config_name"]
for cfg in ds_sheet.frontmatter["configs"]
if cfg["config_name"] != "default"
}
data_folder = repo_path / "data"
datasets = data_folder.glob("*")
for dataset in datasets:
if dataset.name not in REMOVED_DATA:
assert dataset.name in ds_names
|