Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
Commit
·
fb6adc2
1
Parent(s):
dd36adf
Added Wikipedia comments
Browse files- README.md +4 -0
- data/wiki-comments/create.py +86 -0
- data/wiki-comments/wiki-comments.md +89 -0
- data/wiki-comments/wiki-comments.parquet +3 -0
- test_results.log +12 -13
README.md
CHANGED
@@ -129,6 +129,10 @@ configs:
|
|
129 |
data_files:
|
130 |
- split: train
|
131 |
path: data/wiki/*.parquet
|
|
|
|
|
|
|
|
|
132 |
- config_name: nordjyllandnews
|
133 |
data_files:
|
134 |
- split: train
|
|
|
129 |
data_files:
|
130 |
- split: train
|
131 |
path: data/wiki/*.parquet
|
132 |
+
- config_name: wiki-comments
|
133 |
+
data_files:
|
134 |
+
- split: train
|
135 |
+
path: data/wiki-comments/*.parquet
|
136 |
- config_name: nordjyllandnews
|
137 |
data_files:
|
138 |
- split: train
|
data/wiki-comments/create.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
import html
|
5 |
+
|
6 |
+
from datasets import IterableDataset
|
7 |
+
from datasets import Dataset
|
8 |
+
|
9 |
+
import fasttext
|
10 |
+
from huggingface_hub import hf_hub_download
|
11 |
+
|
12 |
+
module_path = os.path.abspath("src/")
|
13 |
+
if module_path not in sys.path:
|
14 |
+
sys.path.append(module_path)
|
15 |
+
from dynaword.process_dataset import (
|
16 |
+
add_token_count,
|
17 |
+
ensure_column_order,
|
18 |
+
remove_duplicate_text,
|
19 |
+
remove_empty_texts,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
def run_cmd(cmd):
|
24 |
+
print(cmd)
|
25 |
+
os.system(cmd)
|
26 |
+
|
27 |
+
|
28 |
+
date = '20250720' # obtained from https://dumps.wikimedia.org/dawiki/
|
29 |
+
# Download the data:
|
30 |
+
if not os.path.isfile('dawiki-' + date + '-pages-articles.xml.bz2'):
|
31 |
+
cmd = 'wget https://dumps.wikimedia.org/dawiki/' + date + '/dawiki-' + date + '-pages-articles.xml.bz2'
|
32 |
+
run_cmd(cmd)
|
33 |
+
|
34 |
+
# Get the adapted wikiextractor
|
35 |
+
if not os.path.isdir('wikiextractor'):
|
36 |
+
cmd = 'git clone https://github.com/robvanderg/wikiextractor.git'
|
37 |
+
run_cmd(cmd)
|
38 |
+
|
39 |
+
|
40 |
+
# clean the data
|
41 |
+
if not os.path.isdir('wikiextractor/dawiki-misc'):
|
42 |
+
cmd = 'cd wikiextractor && python3 -m wikiextractor.WikiExtractor ../dawiki-' + date + '-pages-articles.xml.bz2 -o dawiki-misc --get_misc --json && cd ../ '
|
43 |
+
run_cmd(cmd)
|
44 |
+
|
45 |
+
model_path = hf_hub_download(repo_id="facebook/fasttext-language-identification", filename="model.bin")
|
46 |
+
fasttext_model = fasttext.load_model(model_path)
|
47 |
+
|
48 |
+
|
49 |
+
def read_and_clean(path):
|
50 |
+
comment_id = 0
|
51 |
+
all_rows = []
|
52 |
+
for (root,dirs,files) in os.walk(path, topdown=True):
|
53 |
+
for file in files:
|
54 |
+
path = os.path.join(root, file)
|
55 |
+
for line in open(path):
|
56 |
+
linedata = json.loads(line)
|
57 |
+
title = linedata['title']
|
58 |
+
category = title.split(':')[0]
|
59 |
+
if category == 'Wikipedia':
|
60 |
+
if title.startswith('Wikipedia:Dagens '):
|
61 |
+
continue
|
62 |
+
id = 'wikicomment_' + str(comment_id)
|
63 |
+
comment_id += 1
|
64 |
+
else: # There is more data, but we just want to comments for now
|
65 |
+
continue
|
66 |
+
source = 'wiki_misc'
|
67 |
+
# TODO add linedata['url'] somewhere?
|
68 |
+
text = html.unescape(linedata['text'])
|
69 |
+
|
70 |
+
lang_pred = fasttext_model.predict(line.replace('\n', ' '))
|
71 |
+
if lang_pred[0][0] == '__label__dan_Latn' and lang_pred[1][0] > .5:
|
72 |
+
added = '2025-07-21'
|
73 |
+
created = '2002-02-01, 2025-07-20'
|
74 |
+
row = {"id": id, "text": text, "source": source, "added": added, "created": created}
|
75 |
+
all_rows.append(row)
|
76 |
+
return all_rows
|
77 |
+
|
78 |
+
ds = Dataset.from_list(read_and_clean('wikiextractor/dawiki-misc'))
|
79 |
+
|
80 |
+
ds = remove_empty_texts(ds)
|
81 |
+
ds = remove_duplicate_text(ds)
|
82 |
+
ds = add_token_count(ds)
|
83 |
+
ds = ensure_column_order(ds)
|
84 |
+
|
85 |
+
ds.to_parquet('data/wiki-comments/wiki-comments.parquet')
|
86 |
+
|
data/wiki-comments/wiki-comments.md
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pretty_name: Wikipedia Comments
|
3 |
+
language:
|
4 |
+
- da
|
5 |
+
license: cc0-1.0
|
6 |
+
license_name: CC-0
|
7 |
+
size_categories:
|
8 |
+
- 100k-1M
|
9 |
+
task_categories:
|
10 |
+
- text-generation
|
11 |
+
- fill-mask
|
12 |
+
task_ids:
|
13 |
+
- language-modeling
|
14 |
+
domains:
|
15 |
+
- Encyclopedic
|
16 |
+
---
|
17 |
+
|
18 |
+
# Dataset Card for Wikipedia Comments
|
19 |
+
|
20 |
+
<!-- START-SHORT DESCRIPTION -->
|
21 |
+
Text from the comments sections of the Danish Wikipedia.
|
22 |
+
<!-- END-SHORT DESCRIPTION -->
|
23 |
+
|
24 |
+
|
25 |
+
You can read more about the wikipedia on their [about](https://da.wikipedia.org/wiki/Hj%C3%A6lp:Om) page.
|
26 |
+
|
27 |
+
## Dataset Description
|
28 |
+
|
29 |
+
|
30 |
+
<!-- START-DESC-STATS -->
|
31 |
+
- **Language**: dan, dansk, Danish
|
32 |
+
- **Domains**: Encyclopedic
|
33 |
+
- **Number of samples**:
|
34 |
+
- **Number of tokens (Llama 3)**:
|
35 |
+
- **Average document length (characters)**:
|
36 |
+
<!-- END-DESC-STATS -->
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
## Dataset Structure
|
41 |
+
An example from the dataset looks as follows.
|
42 |
+
|
43 |
+
|
44 |
+
<!-- START-SAMPLE -->
|
45 |
+
```py
|
46 |
+
```
|
47 |
+
|
48 |
+
### Data Fields
|
49 |
+
|
50 |
+
An entry in the dataset consists of the following fields:
|
51 |
+
|
52 |
+
- `id` (`str`): An unique identifier for each document.
|
53 |
+
- `text`(`str`): The content of the document.
|
54 |
+
- `source` (`str`): The source of the document (see [Source Data](#source-data)).
|
55 |
+
- `added` (`str`): An date for when the document was added to this collection.
|
56 |
+
- `created` (`str`): An date range for when the document was originally created.
|
57 |
+
- `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
|
58 |
+
<!-- END-SAMPLE -->
|
59 |
+
|
60 |
+
### Dataset Statistics
|
61 |
+
|
62 |
+
<!-- START-DATASET PLOTS -->
|
63 |
+
<p align="center">
|
64 |
+
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
|
65 |
+
</p>
|
66 |
+
<!-- END-DATASET PLOTS -->
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
## Additional Information
|
71 |
+
|
72 |
+
This dataset is collected using an adapted version of the [WikiExtractor](https://github.com/attardi/wikiextractor). Rob van der Goot created a fork that allows for extracting additional text from Wiki's. The fork can be found here: [WikiExtractor](https://github.com/robvanderg/wikiextractor.git).
|
73 |
+
|
74 |
+
After inspection of the different outputs, there are multiple categories of files, which can most easily be distinguished through the title field. Below, I list the different categories, their size (number of pages), and what they seem to contain after a manual inspection.
|
75 |
+
|
76 |
+
```
|
77 |
+
71472 Kategori: category overview pages
|
78 |
+
19992 Wikipedia: Comments, but also daily articles
|
79 |
+
2379 Portal: Also monthly articles, and some lists/calendars
|
80 |
+
1360 MediaWiki: About files, contains almost no natural language
|
81 |
+
726 Modul: technical stuff, contains almost no (Danish) text
|
82 |
+
171 Hjælp: help pages; info and comments
|
83 |
+
```
|
84 |
+
|
85 |
+
In the current version of the dataset, we used the titles starting with `Wikipedia:` , and remove the daily articles by leaving out titles starting with "Wikipedia:Dagens".
|
86 |
+
|
87 |
+
|
88 |
+
### Citation Information
|
89 |
+
|
data/wiki-comments/wiki-comments.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c9a3bcb73ca772ac1725a3c16efed5640d1e62abbc8d071787e823172b2560e
|
3 |
+
size 15443705
|
test_results.log
CHANGED
@@ -1,25 +1,24 @@
|
|
1 |
============================= test session starts ==============================
|
2 |
-
platform
|
3 |
-
rootdir: /
|
4 |
configfile: pyproject.toml
|
5 |
-
|
6 |
-
collected 328 items
|
7 |
|
8 |
src/tests/test_dataset_schema.py ....................................... [ 11%]
|
9 |
-
|
10 |
-
src/tests/test_datasheets.py ........................................... [
|
11 |
-
........................................................................ [
|
12 |
-
|
13 |
src/tests/test_load.py .. [ 77%]
|
14 |
src/tests/test_quality/test_duplicates.py .............................. [ 86%]
|
15 |
-
|
16 |
src/tests/test_quality/test_short_texts.py ............................. [ 97%]
|
17 |
-
|
18 |
src/tests/test_unique_ids.py . [100%]
|
19 |
|
20 |
=============================== warnings summary ===============================
|
21 |
-
src/tests/test_quality/test_short_texts.py:
|
22 |
-
/
|
23 |
|
24 |
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
|
25 |
-
|
|
|
1 |
============================= test session starts ==============================
|
2 |
+
platform linux -- Python 3.12.3, pytest-8.3.4, pluggy-1.5.0
|
3 |
+
rootdir: /home/rob/Projects/danish-dynaword
|
4 |
configfile: pyproject.toml
|
5 |
+
collected 337 items
|
|
|
6 |
|
7 |
src/tests/test_dataset_schema.py ....................................... [ 11%]
|
8 |
+
................................... [ 21%]
|
9 |
+
src/tests/test_datasheets.py ........................................... [ 34%]
|
10 |
+
........................................................................ [ 56%]
|
11 |
+
...................................................................... [ 76%]
|
12 |
src/tests/test_load.py .. [ 77%]
|
13 |
src/tests/test_quality/test_duplicates.py .............................. [ 86%]
|
14 |
+
.......s [ 88%]
|
15 |
src/tests/test_quality/test_short_texts.py ............................. [ 97%]
|
16 |
+
........ [ 99%]
|
17 |
src/tests/test_unique_ids.py . [100%]
|
18 |
|
19 |
=============================== warnings summary ===============================
|
20 |
+
src/tests/test_quality/test_short_texts.py: 37 warnings
|
21 |
+
/home/rob/Projects/danish-dynaword/.venv/lib/python3.12/site-packages/datasets/utils/_dill.py:385: DeprecationWarning: co_lnotab is deprecated, use co_lines instead.
|
22 |
|
23 |
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
|
24 |
+
=========== 336 passed, 1 skipped, 37 warnings in 155.39s (0:02:35) ============
|