kris927b commited on
Commit
ce4e62c
·
1 Parent(s): 3ffc702

Using the dynaword package rather than custom functions.

Browse files
Files changed (1) hide show
  1. data/danske-taler/create.py +15 -46
data/danske-taler/create.py CHANGED
@@ -4,7 +4,10 @@
4
  # "beautifulsoup4==4.13.3",
5
  # "datasets>=3.0.0",
6
  # "transformers",
 
7
  # ]
 
 
8
  # ///
9
  """
10
  Danske Taler API Downloader
@@ -34,15 +37,20 @@ import time
34
  from datetime import date
35
  from pathlib import Path
36
  from typing import Any
37
- from functools import partial
38
 
39
  from datasets import Dataset
40
- from transformers import AutoTokenizer
41
  import pandas as pd
42
  import requests
43
  from bs4 import BeautifulSoup, NavigableString
44
  from tqdm import tqdm
45
 
 
 
 
 
 
 
 
46
  logger = logging.getLogger(__name__)
47
 
48
  # Configuration
@@ -101,32 +109,6 @@ def contains_html_tags(text):
101
  return any(tag.name in KNOWN_HTML_TAGS for tag in soup.find_all())
102
 
103
 
104
- def _tokenize_function(
105
- examples: dict[str, Any], tokenizer: AutoTokenizer
106
- ) -> dict[str, Any]:
107
- token_count = [
108
- len(tokens)
109
- for tokens in tokenizer(examples["text"], padding=False)[ # type: ignore
110
- "input_ids"
111
- ]
112
- ]
113
- examples["token_count"] = token_count
114
- return examples
115
-
116
-
117
- def add_token_count(
118
- ds: Dataset,
119
- tokenizer_name: str = "AI-Sweden-Models/Llama-3-8B-instruct",
120
- num_proc: int = 4,
121
- ) -> Dataset:
122
- tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=True)
123
-
124
- tokenize = partial(_tokenize_function, tokenizer=tokenizer) # type: ignore
125
-
126
- ds = ds.map(tokenize, batched=True, num_proc=num_proc)
127
- return ds
128
-
129
-
130
  def get_all_speeches() -> list[dict[str, Any]]:
131
  # fetch first page, notably the total number of pages
132
  url = f"{API_BASE_URL}/speeches?per_page=50"
@@ -294,24 +276,13 @@ def main():
294
  df = df[df["license"] == "cc0"]
295
  logger.info(f"Removed {len_df - len(df)} documents without a cc0 license")
296
 
297
- # remove duplicate ids
298
- len_df = len(df)
299
- df = df.drop_duplicates(subset=["id"])
300
- logger.info(f"Removed {len_df - len(df)} duplicate ids")
301
 
302
- # remove rows with empty text
303
- len_df = len(df)
304
- df = df[df["text"].str.strip() != ""]
305
- logger.info(f"Removed {len_df - len(df)} rows with empty text")
306
-
307
- # remove rows with duplicate text
308
- len_df = len(df)
309
- df = df.drop_duplicates(subset=["text"])
310
- logger.info(f"Removed {len_df - len(df)} rows with duplicate text")
311
 
312
- dataset = Dataset.from_pandas(
313
- df[["id", "text", "source", "added", "created"]], preserve_index=False
314
- )
315
  assert len(set(dataset["id"])) == len(dataset), "IDs are not unique"
316
  assert len(set(dataset["text"])) == len(dataset), "Texts are not unique"
317
  assert len(set(df["license"])) == 1, "Multiple licenses found"
@@ -319,8 +290,6 @@ def main():
319
  # check for html tags in text
320
  assert not df["text"].apply(contains_html_tags).any(), "HTML tags found in text"
321
 
322
- dataset = add_token_count(dataset)
323
-
324
  dataset.to_parquet(save_path)
325
 
326
 
 
4
  # "beautifulsoup4==4.13.3",
5
  # "datasets>=3.0.0",
6
  # "transformers",
7
+ # "dynaword"
8
  # ]
9
+ # [tool.uv.sources]
10
+ # dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword", rev = "00e7f2aee7f7ad2da423419f77ecbb9c0536de0d" }
11
  # ///
12
  """
13
  Danske Taler API Downloader
 
37
  from datetime import date
38
  from pathlib import Path
39
  from typing import Any
 
40
 
41
  from datasets import Dataset
 
42
  import pandas as pd
43
  import requests
44
  from bs4 import BeautifulSoup, NavigableString
45
  from tqdm import tqdm
46
 
47
+ from dynaword.process_dataset import (
48
+ add_token_count,
49
+ ensure_column_order,
50
+ remove_duplicate_text,
51
+ remove_empty_texts,
52
+ )
53
+
54
  logger = logging.getLogger(__name__)
55
 
56
  # Configuration
 
109
  return any(tag.name in KNOWN_HTML_TAGS for tag in soup.find_all())
110
 
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  def get_all_speeches() -> list[dict[str, Any]]:
113
  # fetch first page, notably the total number of pages
114
  url = f"{API_BASE_URL}/speeches?per_page=50"
 
276
  df = df[df["license"] == "cc0"]
277
  logger.info(f"Removed {len_df - len(df)} documents without a cc0 license")
278
 
279
+ dataset = Dataset.from_pandas(df, preserve_index=False)
 
 
 
280
 
281
+ dataset = remove_empty_texts(dataset) # remove rows with empty text
282
+ dataset = remove_duplicate_text(dataset) # remove rows with duplicate text
283
+ dataset = add_token_count(dataset)
284
+ dataset = ensure_column_order(dataset)
 
 
 
 
 
285
 
 
 
 
286
  assert len(set(dataset["id"])) == len(dataset), "IDs are not unique"
287
  assert len(set(dataset["text"])) == len(dataset), "Texts are not unique"
288
  assert len(set(df["license"])) == 1, "Multiple licenses found"
 
290
  # check for html tags in text
291
  assert not df["text"].apply(contains_html_tags).any(), "HTML tags found in text"
292
 
 
 
293
  dataset.to_parquet(save_path)
294
 
295