Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
File size: 5,597 Bytes
73fd2fd
f2ad4e1
 
 
 
65faa6e
f2ad4e1
73fd2fd
 
f2ad4e1
 
73fd2fd
 
566156e
73fd2fd
1546256
73fd2fd
f2ad4e1
65faa6e
566156e
65faa6e
 
 
 
 
0cdc88c
3007163
 
 
 
 
73fd2fd
65faa6e
9c15515
566156e
 
 
f1205a6
566156e
 
0cef317
 
1546256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cef317
65faa6e
f2ad4e1
 
65faa6e
 
 
 
 
 
 
 
 
f2ad4e1
 
566156e
0cef317
f2ad4e1
 
 
 
 
0cef317
f2ad4e1
0cef317
65faa6e
f2ad4e1
 
 
566156e
65faa6e
0cef317
566156e
 
0cef317
566156e
 
 
 
 
f2ad4e1
65faa6e
060c443
3007163
 
 
 
 
 
 
 
d36009a
0cdc88c
65faa6e
f1205a6
 
f2ad4e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0f5fe9
f2ad4e1
 
 
 
 
 
 
 
 
 
 
ef0f90a
f2ad4e1
 
 
 
 
38b692a
f2ad4e1
 
65faa6e
 
 
 
 
 
f2ad4e1
 
 
 
 
 
 
 
 
 
c0f5fe9
f2ad4e1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
"""
A simple CLI to updates descriptive statistics on all datasets.

Example use:

    uv run  src/dynaword/update_descriptive_statistics.py --dataset wikisource

"""

import argparse
import json
import logging
from pathlib import Path
from typing import cast

import plotly.express as px
from datasets import Dataset, load_dataset

from dynaword.datasheet import DataSheet
from dynaword.descriptive_stats import DescriptiveStatsOverview
from dynaword.git_utilities import (
    check_is_ancestor,
    get_latest_revision,
)
from dynaword.paths import repo_path
from dynaword.plot_tokens_over_time import create_tokens_over_time_plot
from dynaword.tables import (
    create_grouped_table_str,
    create_overview_table,
    create_overview_table_str,
)

logger = logging.getLogger(__name__)

main_sheet = DataSheet.load_from_path(repo_path / "README.md")
_datasets = [
    cfg["config_name"]  # type: ignore
    for cfg in main_sheet.frontmatter["configs"]  # type: ignore
    if cfg["config_name"] != "default"  # type: ignore
]


logger = logging.getLogger(__name__)


def create_domain_distribution_plot(
    save_dir: Path = repo_path,
):
    df = create_overview_table(
        add_readable_tokens=False, add_total_row=False, add_readme_references=False
    )
    fig = px.sunburst(df, path=["Domain", "Source"], values="N. Tokens")

    fig.update_traces(textinfo="label+percent entry")
    fig.update_layout(title="Dataset Distribution by Domain and Source")

    img_path = save_dir / "images"
    img_path.mkdir(parents=False, exist_ok=True)
    save_path = img_path / "domain_distribution.png"
    fig.write_image(
        save_path,
        width=800,
        height=800,
        scale=2,
    )


def update_dataset(
    dataset_name: str,
    force: bool = False,
) -> None:
    dataset_path = (
        repo_path / "data" / dataset_name if dataset_name != "default" else repo_path
    )

    if dataset_name == "default":
        readme_name = "README.md"
    else:
        readme_name = f"{dataset_name}.md"

    rev = get_latest_revision(dataset_path)
    desc_stats_path = dataset_path / "descriptive_stats.json"
    markdown_path = dataset_path / readme_name

    if desc_stats_path.exists() and force is False:
        with desc_stats_path.open("r") as f:
            last_update = json.load(f).get("revision", None)

        if last_update is None:
            logger.warning(f"revision is not defined in {desc_stats_path}.")
        elif check_is_ancestor(ancestor_rev=last_update, rev=rev):
            logger.info(
                f"descriptive statistics for '{dataset_name}' is already up to date, skipping."
            )
            return

    logger.info(f"Computing descriptive stats for: {dataset_name}")
    ds = load_dataset(str(repo_path), dataset_name, split="train")
    ds = cast(Dataset, ds)
    desc_stats = DescriptiveStatsOverview.from_dataset(ds)
    desc_stats.to_disk(desc_stats_path)

    logger.info(f"Updating datasheet for: {dataset_name}")
    sheet = DataSheet.load_from_path(markdown_path)
    sheet.body = sheet.add_descriptive_stats(descriptive_stats=desc_stats)
    sheet.body = sheet.add_sample_and_description(ds)
    sheet.body = sheet.add_dataset_plots(ds, create_plot=True)

    if dataset_name == "default":
        logger.info("Updating Overview table")
        overview_table = create_overview_table_str()
        sheet.body = sheet.replace_tag(package=overview_table, tag="MAIN TABLE")
        logger.info("Updating domain table")
        domain_table = create_grouped_table_str(group="Domain")
        sheet.body = sheet.replace_tag(package=domain_table, tag="DOMAIN TABLE")
        logger.info("Updating license table")
        domain_table = create_grouped_table_str(group="License")
        sheet.body = sheet.replace_tag(package=domain_table, tag="LICENSE TABLE")
        create_domain_distribution_plot()
        create_tokens_over_time_plot()

    sheet.write_to_path()


def create_parser():
    parser = argparse.ArgumentParser(
        description="Calculated descriptive statistics of the datasets in tha data folder"
    )
    parser.add_argument(
        "--dataset",
        default=None,
        type=str,
        help="Use to specify if you only want to compute the statistics from a singular dataset.",
    )
    parser.add_argument(
        "--logging_level",
        default=20,
        type=int,
        help="Sets the logging level. Default to 20 (INFO), other reasonable levels are 10 (DEBUG) and 30 (WARNING).",
    )
    parser.add_argument(
        "--force",
        type=bool,
        default=False,
        action=argparse.BooleanOptionalAction,
        help="Should the statistics be forcefully recomputed. By default it checks the difference in commit ids.",
    )
    parser.add_argument(
        "--repo_path",
        default=str(repo_path),
        type=str,
        help="The repository where to calculate the descriptive statistics from",
    )
    return parser


def main(
    dataset: str | None = None,
    logging_level: int = 20,
    force: bool = False,
    repo_path: Path = repo_path,
) -> None:
    logging.basicConfig(level=logging_level)

    if dataset:
        update_dataset(dataset, force=force)
    else:
        for dataset_name in _datasets:
            update_dataset(dataset_name, force=force)
        update_dataset("default", force=force)


if __name__ == "__main__":
    parser = create_parser()
    args = parser.parse_args()

    main(
        args.dataset,
        logging_level=args.logging_level,
        force=args.force,
        repo_path=Path(args.repo_path),
    )