Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
import json | |
import logging | |
from dataclasses import dataclass | |
from pathlib import Path | |
from typing import Self | |
from datasets import Dataset | |
from dynaword.git_utilities import ( | |
get_current_revision, | |
) | |
logger = logging.getLogger(__name__) | |
def calculate_average_document_length( | |
dataset: Dataset, text_column: str = "text" | |
) -> float: | |
texts = sum(len(t) for t in dataset[text_column]) | |
return texts / len(dataset) | |
class DescriptiveStatsOverview: | |
number_of_samples: int | |
average_document_length: float | |
number_of_tokens: int | |
def from_disk(cls, path: Path): | |
with path.open("r") as f: | |
data = json.load(f) | |
if "revision" in data: | |
data.pop("revision") | |
obj = cls(**data) | |
return obj | |
def to_disk(self, path: Path): | |
data = self.__dict__ | |
data["revision"] = get_current_revision() | |
with path.with_suffix(".json").open("w") as f: | |
json.dump(self.__dict__, f, indent=2) | |
def from_dataset(cls, dataset: Dataset) -> Self: | |
return cls( | |
number_of_samples=len(dataset), | |
average_document_length=calculate_average_document_length(dataset), | |
number_of_tokens=sum(dataset["token_count"]), | |
) | |