Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
# /// script | |
# requires-python = ">=3.12" | |
# dependencies = [ | |
# "beautifulsoup4==4.13.3", | |
# "datasets>=3.0.0", | |
# "transformers", | |
# "dynaword" | |
# ] | |
# [tool.uv.sources] | |
# dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword", rev = "00e7f2aee7f7ad2da423419f77ecbb9c0536de0d" } | |
# /// | |
""" | |
Danske Taler API Downloader | |
This script downloads speeches/articles from the Danske Taler API: https://www.dansketaler.dk/api/v1 | |
It saves it into the following structure: | |
``` | |
{ | |
"text": "Lav et referat af nedenstående tekst:\n\nTekst:\nOpdatering: Manden er nu fundet af Nordjyllands Politi[...]", | |
"source": "nordjyllandnews", | |
"id": "nordjyllandnews_0", | |
"added": "2024-12-16", | |
"created": "2000-01-01, 2024-01-01", | |
"license": "Creative Commons Legal Code\n\nCC0 1.0 Universal", | |
"domain": "News", | |
"metadata": { | |
"source-pretty": "Nordjylland News" | |
} | |
} | |
``` | |
Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword: | |
```bash | |
GIT_LFS_SKIP_SMUDGE=1 uv run data/memo/create.py | |
``` | |
This second version fixed previous issues with the download and processing of the Danish Memo repository: | |
https://huggingface.co/datasets/danish-foundation-models/danish-dynaword/discussions/67 | |
""" | |
import logging | |
import time | |
from datetime import date | |
from pathlib import Path | |
from typing import Any | |
from datasets import Dataset | |
import pandas as pd | |
import requests | |
from bs4 import BeautifulSoup, NavigableString | |
from tqdm import tqdm | |
from dynaword.process_dataset import ( | |
add_token_count, | |
ensure_column_order, | |
remove_duplicate_text, | |
remove_empty_texts, | |
) | |
logger = logging.getLogger(__name__) | |
# Configuration | |
API_BASE_URL = "https://www.dansketaler.dk/api/v1" | |
KNOWN_HTML_TAGS = { | |
"html", | |
"head", | |
"body", | |
"title", | |
"meta", | |
"link", | |
"script", | |
"style", | |
"div", | |
"span", | |
"p", | |
"a", | |
"ul", | |
"ol", | |
"li", | |
"table", | |
"tr", | |
"td", | |
"th", | |
"img", | |
"h1", | |
"h2", | |
"h3", | |
"h4", | |
"h5", | |
"h6", | |
"strong", | |
"em", | |
"br", | |
"hr", | |
"form", | |
"input", | |
"button", | |
"label", | |
"select", | |
"option", | |
"textarea", | |
"iframe", | |
"nav", | |
"footer", | |
"header", | |
"main", | |
"section", | |
"article", | |
} | |
def contains_html_tags(text): | |
soup = BeautifulSoup(str(text), "html.parser") | |
return any(tag.name in KNOWN_HTML_TAGS for tag in soup.find_all()) | |
def get_all_speeches() -> list[dict[str, Any]]: | |
# fetch first page, notably the total number of pages | |
url = f"{API_BASE_URL}/speeches?per_page=50" | |
response = requests.get(url) | |
response.raise_for_status() | |
speeches = response.json() | |
meta = speeches["meta"] | |
total_pages = meta["total_pages"] | |
# fetch all pages | |
all_speeches = [] | |
for page in range(1, total_pages + 1): | |
url = f"{API_BASE_URL}/speeches?per_page=50&page={page}" | |
response = requests.get(url) | |
response.raise_for_status() | |
speeches = response.json() | |
all_speeches.extend(speeches["speeches"]) | |
return all_speeches | |
def fetch_speech_content( | |
url: str, max_retries: int = 3, backoff_factor: float = 0.5 | |
) -> tuple[str | None, str]: | |
""" | |
Fetches the license div from the page with retry logic. | |
Args: | |
url: The URL to fetch the license div from | |
max_retries: Maximum number of retry attempts | |
backoff_factor: Factor to determine exponential backoff time between retries | |
Returns: | |
The text content of the license div if found, None otherwise | |
""" | |
retries = 0 | |
while retries <= max_retries: | |
try: | |
response = requests.get(url, timeout=10) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, "html.parser") | |
license_div = soup.find("div", class_="speech-copyright") | |
speech_div = soup.find("div", class_="speech-article-content") | |
speech = "" | |
if speech_div: | |
# Iterate over the children of the found div | |
for child_div in speech_div.children: # type: ignore | |
if child_div.name == "div": # type: ignore | |
current_paragraph = [] | |
for content in child_div.contents: # type: ignore | |
if isinstance(content, NavigableString): | |
# Append text content | |
current_paragraph.append(str(content).strip()) | |
elif content.name == "br": | |
# If a <br> is encountered, join and print the current paragraph, then reset | |
if current_paragraph: | |
speech += "".join(current_paragraph) | |
speech += "\n" # Add a newline for paragraph break | |
current_paragraph = [] | |
# Print any remaining text in the current_paragraph list | |
if current_paragraph: | |
speech += "".join(current_paragraph) | |
speech += "\n" # Add a newline for paragraph break | |
return (license_div.text if license_div else None, speech) | |
except (requests.RequestException, AttributeError) as e: | |
retries += 1 | |
if retries > max_retries: | |
logger.info( | |
f"Failed to fetch license after {max_retries} attempts: {str(e)}" | |
) | |
return (None, "") | |
# Calculate backoff time using exponential backoff | |
wait_time = backoff_factor * (2 ** (retries - 1)) | |
logger.info( | |
f"Attempt {retries} failed. Retrying in {wait_time:.2f} seconds..." | |
) | |
time.sleep(wait_time) | |
return (None, "") | |
def convert_to_license(license_information: str | None) -> str | None: | |
"""checks if "Materialet er fri af ophavsret" is in the page""" | |
if license_information and ( | |
("Materialet er fri af ophavsret" in license_information) | |
or ("Materialet er fri af ophvasret" in license_information) | |
or ("Ophavsretten er bortfaldet" in license_information) | |
or ("Manuskriptet er fri af ophavsret" in license_information) | |
or ("Offentlig " == license_information) | |
): | |
return "cc0" | |
return license_information | |
def convert_to_row(speech_meta: dict[str, Any]) -> dict[str, Any]: | |
speech_id = speech_meta["id"] | |
date_of_speech = speech_meta["date"]["iso_date"] | |
date_of_speech_start = f"{date_of_speech}" | |
date_of_speech_end = f"{date_of_speech}" | |
(license_information, speech) = fetch_speech_content(speech_meta["url"]) | |
row = { | |
"id": f"danske-taler_{speech_id}", | |
"text": speech, | |
"source": "danske-taler", | |
# current date | |
"added": date.today().isoformat(), | |
"created": f"{date_of_speech_start}, {date_of_speech_end}", | |
"license_information": license_information, | |
"domain": "Spoken", | |
"metadata": {"source-pretty": "Danske Taler"}, | |
} | |
return row | |
def download_speeches() -> pd.DataFrame: | |
logger.info("Fetching all speeches from Danske Taler API") | |
speeches = get_all_speeches() | |
logger.info(f"Found {len(speeches)} speeches") | |
rows = [] | |
for speech in tqdm(speeches): | |
row = convert_to_row(speech) | |
rows.append(row) | |
logger.info(f"Saving {len(rows)} speeches to dataset") | |
df = pd.DataFrame(rows) | |
return df | |
def main(): | |
save_path = Path(__file__).parent / "danske-taler.parquet" | |
save_path_all = Path(__file__).parent / "tmp" / "danske-taler-all.parquet" | |
save_path_all.parent.mkdir(parents=False, exist_ok=True) | |
if save_path_all.exists(): | |
logger.info(f"Loading dataset from {save_path_all}") | |
df = pd.read_parquet(save_path_all) | |
else: | |
logger.info(f"Downloading speeches and saving to {save_path_all}") | |
df = download_speeches() | |
df.to_parquet(save_path_all) | |
licenses = [convert_to_license(license) for license in df["license_information"]] | |
df["license"] = licenses | |
uniques_licenses = set(df["license"].tolist()) | |
logger.info("Unique licenses:") | |
for license in uniques_licenses: | |
logger.info(f"\t{license}") | |
# remove documents without a cc0 license | |
len_df = len(df) | |
df = df[df["license"] == "cc0"] | |
logger.info(f"Removed {len_df - len(df)} documents without a cc0 license") | |
dataset = Dataset.from_pandas(df, preserve_index=False) | |
dataset = remove_empty_texts(dataset) # remove rows with empty text | |
dataset = remove_duplicate_text(dataset) # remove rows with duplicate text | |
dataset = add_token_count(dataset) | |
dataset = ensure_column_order(dataset) | |
assert len(set(dataset["id"])) == len(dataset), "IDs are not unique" | |
assert len(set(dataset["text"])) == len(dataset), "Texts are not unique" | |
assert len(set(df["license"])) == 1, "Multiple licenses found" | |
# check for html tags in text | |
assert not df["text"].apply(contains_html_tags).any(), "HTML tags found in text" | |
dataset.to_parquet(save_path) | |
if __name__ == "__main__": | |
log_path = Path(__file__).parent / "danske-taler.log" | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s - %(levelname)s - %(message)s", | |
handlers=[ | |
logging.StreamHandler(), | |
logging.FileHandler(log_path), | |
], | |
) | |
main() | |