Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
Kenneth Enevoldsen
commited on
add "in tokens" to average document length and add dataset sizes plot
Browse files- README.md +9 -0
- images/dataset_size_plot.html +0 -0
- images/dataset_size_plot.svg +1 -0
- src/dynaword/datasheet.py +2 -2
- src/dynaword/{plots.py → plots/descriptive_statistics_plots.py} +1 -1
- src/dynaword/{plot_tokens_over_time.py → plots/plot_tokens_over_time.py} +0 -0
- src/dynaword/plots/plots_dataset_size.py +126 -0
- src/dynaword/update_descriptive_statistics.py +1 -1
README.md
CHANGED
@@ -222,6 +222,7 @@ https://github.com/huggingface/datasets/blob/main/templates/README_guide.md
|
|
222 |
- [Annotations](#annotations)
|
223 |
- [Source Data](#source-data)
|
224 |
- [Data Collection and Processing](#data-collection-and-processing)
|
|
|
225 |
- [Contributing to the dataset](#contributing-to-the-dataset)
|
226 |
- [Citation Information](#citation-information)
|
227 |
- [License information](#license-information)
|
@@ -585,6 +586,14 @@ The data collection and processing varies depending on the dataset and is docume
|
|
585 |
|
586 |
In addition to data specific processing we also run a series automated quality checks to ensure formatting (e.g. ensuring correctly formatted columns and unique IDs), quality checks (e.g. duplicate and empty string detection) and datasheet documentation checks. These checks are there to ensure a high quality of documentation and a minimal level of quality. To allow for the development of novel cleaning methodologies we do not provide more extensive cleaning.
|
587 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
588 |
|
589 |
### Contributing to the dataset
|
590 |
|
|
|
222 |
- [Annotations](#annotations)
|
223 |
- [Source Data](#source-data)
|
224 |
- [Data Collection and Processing](#data-collection-and-processing)
|
225 |
+
- [Dataset Statistics](#dataset-statistics)
|
226 |
- [Contributing to the dataset](#contributing-to-the-dataset)
|
227 |
- [Citation Information](#citation-information)
|
228 |
- [License information](#license-information)
|
|
|
586 |
|
587 |
In addition to data specific processing we also run a series automated quality checks to ensure formatting (e.g. ensuring correctly formatted columns and unique IDs), quality checks (e.g. duplicate and empty string detection) and datasheet documentation checks. These checks are there to ensure a high quality of documentation and a minimal level of quality. To allow for the development of novel cleaning methodologies we do not provide more extensive cleaning.
|
588 |
|
589 |
+
### Dataset Statistics
|
590 |
+
The following plot(s) are intended to give an overview of docuements length in the various sources.
|
591 |
+
|
592 |
+
<p align="center">
|
593 |
+
<img src="./images/dataset_size_plot.svg" width="600" style="margin-right: 10px;" />
|
594 |
+
</p>
|
595 |
+
|
596 |
+
|
597 |
|
598 |
### Contributing to the dataset
|
599 |
|
images/dataset_size_plot.html
ADDED
The diff for this file is too large to render.
See raw diff
|
|
images/dataset_size_plot.svg
ADDED
|
src/dynaword/datasheet.py
CHANGED
@@ -11,7 +11,7 @@ from datasets import Dataset, IterableDataset, load_dataset
|
|
11 |
from pydantic import BaseModel, field_validator
|
12 |
|
13 |
from dynaword.descriptive_stats import DescriptiveStatsOverview
|
14 |
-
from dynaword.plots import create_descriptive_statistics_plots
|
15 |
from dynaword.typings import DOMAIN, LICENSE, LICENSE_NAMES_MAPPING
|
16 |
|
17 |
logger = logging.getLogger(__name__)
|
@@ -195,7 +195,7 @@ class DataSheet(BaseModel):
|
|
195 |
dedent(f"""
|
196 |
- **Number of samples**: {convert_to_human_readable(d_stats.number_of_samples)}
|
197 |
- **Number of tokens (Llama 3)**: {convert_to_human_readable(d_stats.number_of_tokens)}
|
198 |
-
- **Average document length (min, max)**: {convert_to_human_readable(d_stats.average_document_length_tokens)}
|
199 |
""").strip()
|
200 |
+ "\n"
|
201 |
)
|
|
|
11 |
from pydantic import BaseModel, field_validator
|
12 |
|
13 |
from dynaword.descriptive_stats import DescriptiveStatsOverview
|
14 |
+
from dynaword.plots.descriptive_statistics_plots import create_descriptive_statistics_plots
|
15 |
from dynaword.typings import DOMAIN, LICENSE, LICENSE_NAMES_MAPPING
|
16 |
|
17 |
logger = logging.getLogger(__name__)
|
|
|
195 |
dedent(f"""
|
196 |
- **Number of samples**: {convert_to_human_readable(d_stats.number_of_samples)}
|
197 |
- **Number of tokens (Llama 3)**: {convert_to_human_readable(d_stats.number_of_tokens)}
|
198 |
+
- **Average document length in tokens (min, max)**: {convert_to_human_readable(d_stats.average_document_length_tokens)} ({convert_to_human_readable(d_stats.min_length_tokens)}, {convert_to_human_readable(d_stats.max_length_tokens)})
|
199 |
""").strip()
|
200 |
+ "\n"
|
201 |
)
|
src/dynaword/{plots.py → plots/descriptive_statistics_plots.py}
RENAMED
@@ -41,4 +41,4 @@ def create_descriptive_statistics_plots(
|
|
41 |
verbose=False,
|
42 |
)
|
43 |
|
44 |
-
return save_path, plot
|
|
|
41 |
verbose=False,
|
42 |
)
|
43 |
|
44 |
+
return save_path, plot
|
src/dynaword/{plot_tokens_over_time.py → plots/plot_tokens_over_time.py}
RENAMED
File without changes
|
src/dynaword/plots/plots_dataset_size.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
import plotly.graph_objects as go
|
7 |
+
|
8 |
+
from dynaword.datasheet import DataSheet
|
9 |
+
from dynaword.paths import repo_path
|
10 |
+
|
11 |
+
# Configure logging
|
12 |
+
logging.basicConfig(
|
13 |
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
14 |
+
)
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
def _create_descriptive_stats_table(
|
19 |
+
repo_path: Path = repo_path,
|
20 |
+
) -> pd.DataFrame:
|
21 |
+
"""
|
22 |
+
Create a DataFrame from the descriptive statistics data.
|
23 |
+
"""
|
24 |
+
p = (repo_path / "data").glob("**/*descriptive_stats.json")
|
25 |
+
|
26 |
+
data = []
|
27 |
+
for path in p:
|
28 |
+
with path.open("r") as f:
|
29 |
+
package = json.load(f)
|
30 |
+
sheet = DataSheet.load_from_path(path.parent / f"{path.parent.name}.md")
|
31 |
+
package["dataset_name"] = path.parent.name
|
32 |
+
package["pretty_name"] = sheet.pretty_name
|
33 |
+
data.append(package)
|
34 |
+
|
35 |
+
df = pd.DataFrame(data)
|
36 |
+
df["mean_length_tokens"] = df["number_of_tokens"] / df["number_of_samples"]
|
37 |
+
df["mean_length_characters"] = df["number_of_characters"] / df["number_of_samples"]
|
38 |
+
return df
|
39 |
+
|
40 |
+
|
41 |
+
def plot_dataset_size(df: pd.DataFrame) -> go.Figure:
|
42 |
+
"""Plot dataset size using a range plot with min, max, and mean token lengths."""
|
43 |
+
# Calculate mean token length per document
|
44 |
+
df["mean_length_tokens"] = df["number_of_tokens"] / df["number_of_samples"]
|
45 |
+
|
46 |
+
# Create the range plot
|
47 |
+
fig = go.Figure()
|
48 |
+
|
49 |
+
# Add range bars (from min to max)
|
50 |
+
for i, row in df.iterrows():
|
51 |
+
fig.add_trace(
|
52 |
+
go.Scatter(
|
53 |
+
x=[row["min_length_tokens"], row["max_length_tokens"]],
|
54 |
+
y=[row["dataset_name"], row["dataset_name"]],
|
55 |
+
mode="lines",
|
56 |
+
line=dict(color="lightgray", width=3),
|
57 |
+
showlegend=False,
|
58 |
+
hoverinfo="skip",
|
59 |
+
)
|
60 |
+
)
|
61 |
+
|
62 |
+
# Add min points
|
63 |
+
fig.add_trace(
|
64 |
+
go.Scatter(
|
65 |
+
x=df["min_length_tokens"],
|
66 |
+
y=df["dataset_name"],
|
67 |
+
mode="markers",
|
68 |
+
marker=dict(color="lightblue", size=6, symbol="circle"),
|
69 |
+
name="Min tokens",
|
70 |
+
hovertemplate="<b>%{y}</b><br>Min: %{x:,} tokens<extra></extra>",
|
71 |
+
)
|
72 |
+
)
|
73 |
+
|
74 |
+
# Add max points
|
75 |
+
fig.add_trace(
|
76 |
+
go.Scatter(
|
77 |
+
x=df["max_length_tokens"],
|
78 |
+
y=df["dataset_name"],
|
79 |
+
mode="markers",
|
80 |
+
marker=dict(color="darkred", size=6, symbol="circle"),
|
81 |
+
name="Max tokens",
|
82 |
+
hovertemplate="<b>%{y}</b><br>Max: %{x:,} tokens<extra></extra>",
|
83 |
+
)
|
84 |
+
)
|
85 |
+
|
86 |
+
# Add mean points
|
87 |
+
fig.add_trace(
|
88 |
+
go.Scatter(
|
89 |
+
x=df["mean_length_tokens"],
|
90 |
+
y=df["dataset_name"],
|
91 |
+
mode="markers",
|
92 |
+
marker=dict(color="orange", size=8, symbol="diamond"),
|
93 |
+
name="Mean tokens",
|
94 |
+
hovertemplate="<b>%{y}</b><br>Mean: %{x:,.0f} tokens<extra></extra>",
|
95 |
+
)
|
96 |
+
)
|
97 |
+
|
98 |
+
fig.update_layout(
|
99 |
+
title="Token Length Distribution by Dataset<br><sub>Range (min-max) with mean values</sub>",
|
100 |
+
xaxis_title="Number of Tokens (log scale)",
|
101 |
+
xaxis_type="log",
|
102 |
+
yaxis_title="Dataset",
|
103 |
+
height=500,
|
104 |
+
template="plotly_white",
|
105 |
+
margin=dict(l=120), # More space for dataset names
|
106 |
+
)
|
107 |
+
|
108 |
+
return fig
|
109 |
+
|
110 |
+
|
111 |
+
def create_dataset_size_plot() -> None:
|
112 |
+
logger.info("Creating range plot of dataset sizes using `descriptive_stats.json`.")
|
113 |
+
df = _create_descriptive_stats_table()
|
114 |
+
fig = plot_dataset_size(df)
|
115 |
+
|
116 |
+
save_path = repo_path / "images" / "dataset_size_plot.html"
|
117 |
+
save_path_svg = repo_path / "images" / "dataset_size_plot.svg"
|
118 |
+
|
119 |
+
logger.info(f"Saving dataset size plot to {save_path} and {save_path_svg}.")
|
120 |
+
save_path.parent.mkdir(parents=True, exist_ok=True)
|
121 |
+
fig.write_html(save_path)
|
122 |
+
fig.write_image(save_path_svg)
|
123 |
+
|
124 |
+
|
125 |
+
if __name__ == "__main__":
|
126 |
+
create_dataset_size_plot()
|
src/dynaword/update_descriptive_statistics.py
CHANGED
@@ -18,7 +18,7 @@ from datasets import Dataset, load_dataset
|
|
18 |
from dynaword.datasheet import DataSheet
|
19 |
from dynaword.descriptive_stats import DescriptiveStatsOverview
|
20 |
from dynaword.paths import repo_path
|
21 |
-
from dynaword.plot_tokens_over_time import create_tokens_over_time_plot
|
22 |
from dynaword.tables import (
|
23 |
create_grouped_table_str,
|
24 |
create_overview_table,
|
|
|
18 |
from dynaword.datasheet import DataSheet
|
19 |
from dynaword.descriptive_stats import DescriptiveStatsOverview
|
20 |
from dynaword.paths import repo_path
|
21 |
+
from dynaword.plots.plot_tokens_over_time import create_tokens_over_time_plot
|
22 |
from dynaword.tables import (
|
23 |
create_grouped_table_str,
|
24 |
create_overview_table,
|