Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
Minor changes to make the tests pass, and adding proper logging mechanism.
Browse files
data/domsdatabasen/create.py
CHANGED
@@ -25,6 +25,7 @@ Note: This script is designed to be run using a GPU.
|
|
25 |
"""
|
26 |
|
27 |
import atexit
|
|
|
28 |
import os
|
29 |
import csv
|
30 |
import time
|
@@ -63,11 +64,13 @@ from dynaword.process_dataset import (
|
|
63 |
remove_empty_texts,
|
64 |
)
|
65 |
|
|
|
|
|
66 |
# ----------------- Config ------------------
|
67 |
|
68 |
-
PDF_DIR = Path("pdfs"
|
69 |
-
LOG_FILE = Path("progress_log.csv"
|
70 |
-
PARQUET_FILE = Path("
|
71 |
MAX_WORKERS = 10
|
72 |
RETRY_COUNT = 3
|
73 |
RETRY_DELAY = 2
|
@@ -80,29 +83,8 @@ HEADERS = {
|
|
80 |
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
|
81 |
"Connection": "keep-alive",
|
82 |
"Content-Type": "application/json",
|
83 |
-
"Cookie": (
|
84 |
-
"CookieConsent={stamp:'ZWnz3xRN5bhZXpF3CE69z5Bf1hTj78qUE084UV2i4YScmyXmkGVkxA==',"
|
85 |
-
"necessary:true,preferences:false,statistics:false,marketing:false,"
|
86 |
-
"method:'explicit',ver:1,utc:1733148117245,region:'dk'}"
|
87 |
-
),
|
88 |
-
"DNT": "1",
|
89 |
-
"Origin": "https://domsdatabasen.dk",
|
90 |
-
"Referer": "https://domsdatabasen.dk/",
|
91 |
-
"User-Agent": (
|
92 |
-
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 "
|
93 |
-
"(KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"
|
94 |
-
),
|
95 |
-
"sec-ch-ua": '"Not)A;Brand";v="8", "Chromium";v="138"',
|
96 |
-
"sec-ch-ua-mobile": "?0",
|
97 |
-
"sec-ch-ua-platform": '"macOS"',
|
98 |
}
|
99 |
|
100 |
-
# ----------------- Logging ------------------
|
101 |
-
|
102 |
-
|
103 |
-
def log(msg: str):
|
104 |
-
print(f"[{time.strftime('%Y-%m-%d %H:%M:%S')}] {msg}")
|
105 |
-
|
106 |
|
107 |
def init_csv():
|
108 |
if not LOG_FILE.exists():
|
@@ -145,7 +127,7 @@ def retry(func, *args, retries=RETRY_COUNT, delay=RETRY_DELAY, **kwargs):
|
|
145 |
try:
|
146 |
return func(*args, **kwargs)
|
147 |
except Exception as e:
|
148 |
-
|
149 |
time.sleep(delay)
|
150 |
raise RuntimeError(f"โ All retries failed for {func.__name__}({args})")
|
151 |
|
@@ -157,7 +139,7 @@ def download_pdf(document: dict) -> Path | None:
|
|
157 |
document_id = document["id"]
|
158 |
out_path = PDF_DIR / f"document_{document_id}.pdf"
|
159 |
if out_path.exists():
|
160 |
-
|
161 |
return out_path
|
162 |
|
163 |
url = f"https://domsdatabasen.dk/webapi/api/Case/document/download/{document_id}"
|
@@ -166,7 +148,7 @@ def download_pdf(document: dict) -> Path | None:
|
|
166 |
if response.status_code == 200:
|
167 |
with open(out_path, "wb") as f:
|
168 |
f.write(response.content)
|
169 |
-
|
170 |
append_log(document_id, pdf=True, text=False)
|
171 |
return out_path
|
172 |
else:
|
@@ -216,7 +198,7 @@ def process_document(document: dict) -> dict | None:
|
|
216 |
if response.status_code == 200:
|
217 |
with open(pdf_path, "wb") as f:
|
218 |
f.write(response.content)
|
219 |
-
|
220 |
else:
|
221 |
raise RuntimeError(f"Download failed: {response.status_code}")
|
222 |
except Exception as e:
|
@@ -229,7 +211,7 @@ def process_document(document: dict) -> dict | None:
|
|
229 |
converter = PdfConverter(artifact_dict=model_refs, config=config)
|
230 |
rendered = retry(converter, str(pdf_path))
|
231 |
text, _, _ = text_from_rendered(rendered)
|
232 |
-
|
233 |
append_log(document_id, pdf=True, text=True)
|
234 |
|
235 |
del rendered
|
@@ -296,18 +278,18 @@ def main():
|
|
296 |
all_records = []
|
297 |
page_num = 1
|
298 |
_, total_pages = fetch_case_page(1)
|
299 |
-
|
300 |
|
301 |
existing_ids = load_existing_ids()
|
302 |
-
|
303 |
|
304 |
while page_num <= total_pages:
|
305 |
-
|
306 |
|
307 |
try:
|
308 |
doc_infos, _ = fetch_case_page(page_num)
|
309 |
except Exception as e:
|
310 |
-
|
311 |
page_num += 1
|
312 |
continue
|
313 |
|
@@ -329,10 +311,6 @@ def main():
|
|
329 |
|
330 |
if all_records:
|
331 |
ds_new = Dataset.from_list(all_records)
|
332 |
-
ds_new = remove_empty_texts(ds_new)
|
333 |
-
ds_new = remove_duplicate_text(ds_new)
|
334 |
-
ds_new = add_token_count(ds_new)
|
335 |
-
ds_new = ensure_column_order(ds_new)
|
336 |
|
337 |
if PARQUET_FILE.exists():
|
338 |
ds_old = Dataset.from_parquet(str(PARQUET_FILE))
|
@@ -342,13 +320,31 @@ def main():
|
|
342 |
ds_combined = ds_new
|
343 |
|
344 |
ds_combined.to_parquet(str(PARQUET_FILE))
|
345 |
-
|
346 |
existing_ids.update([r["id"] for r in all_records])
|
347 |
all_records.clear()
|
348 |
|
349 |
page_num += 1
|
350 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
|
352 |
if __name__ == "__main__":
|
353 |
mp.set_start_method("spawn", force=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
354 |
main()
|
|
|
25 |
"""
|
26 |
|
27 |
import atexit
|
28 |
+
import logging
|
29 |
import os
|
30 |
import csv
|
31 |
import time
|
|
|
64 |
remove_empty_texts,
|
65 |
)
|
66 |
|
67 |
+
logger = logging.getLogger(__name__)
|
68 |
+
|
69 |
# ----------------- Config ------------------
|
70 |
|
71 |
+
PDF_DIR = Path(__file__).parent / "pdfs"
|
72 |
+
LOG_FILE = Path(__file__).parent / "progress_log.csv"
|
73 |
+
PARQUET_FILE = Path(__file__).parent / "domsdatabasen.parquet"
|
74 |
MAX_WORKERS = 10
|
75 |
RETRY_COUNT = 3
|
76 |
RETRY_DELAY = 2
|
|
|
83 |
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
|
84 |
"Connection": "keep-alive",
|
85 |
"Content-Type": "application/json",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
}
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
def init_csv():
|
90 |
if not LOG_FILE.exists():
|
|
|
127 |
try:
|
128 |
return func(*args, **kwargs)
|
129 |
except Exception as e:
|
130 |
+
logger.warning(f"โ ๏ธ Retry {attempt+1}/{retries} failed: {e}")
|
131 |
time.sleep(delay)
|
132 |
raise RuntimeError(f"โ All retries failed for {func.__name__}({args})")
|
133 |
|
|
|
139 |
document_id = document["id"]
|
140 |
out_path = PDF_DIR / f"document_{document_id}.pdf"
|
141 |
if out_path.exists():
|
142 |
+
logger.info(f"โญ๏ธ Skipped PDF (exists): {document_id}")
|
143 |
return out_path
|
144 |
|
145 |
url = f"https://domsdatabasen.dk/webapi/api/Case/document/download/{document_id}"
|
|
|
148 |
if response.status_code == 200:
|
149 |
with open(out_path, "wb") as f:
|
150 |
f.write(response.content)
|
151 |
+
logger.info(f"โ
Downloaded PDF: {document_id}")
|
152 |
append_log(document_id, pdf=True, text=False)
|
153 |
return out_path
|
154 |
else:
|
|
|
198 |
if response.status_code == 200:
|
199 |
with open(pdf_path, "wb") as f:
|
200 |
f.write(response.content)
|
201 |
+
logger.info(f"โ
Downloaded PDF: {document_id}")
|
202 |
else:
|
203 |
raise RuntimeError(f"Download failed: {response.status_code}")
|
204 |
except Exception as e:
|
|
|
211 |
converter = PdfConverter(artifact_dict=model_refs, config=config)
|
212 |
rendered = retry(converter, str(pdf_path))
|
213 |
text, _, _ = text_from_rendered(rendered)
|
214 |
+
logger.info(f"๐๏ธ Extracted text: {document_id}")
|
215 |
append_log(document_id, pdf=True, text=True)
|
216 |
|
217 |
del rendered
|
|
|
278 |
all_records = []
|
279 |
page_num = 1
|
280 |
_, total_pages = fetch_case_page(1)
|
281 |
+
logger.info(f"๐ Total pages: {total_pages}")
|
282 |
|
283 |
existing_ids = load_existing_ids()
|
284 |
+
logger.info(f"๐ Resuming with {len(existing_ids)} already processed IDs")
|
285 |
|
286 |
while page_num <= total_pages:
|
287 |
+
logger.info(f"\n๐ Fetching page {page_num}/{total_pages}")
|
288 |
|
289 |
try:
|
290 |
doc_infos, _ = fetch_case_page(page_num)
|
291 |
except Exception as e:
|
292 |
+
logger.warning(f"โ Failed to fetch page {page_num}: {e}")
|
293 |
page_num += 1
|
294 |
continue
|
295 |
|
|
|
311 |
|
312 |
if all_records:
|
313 |
ds_new = Dataset.from_list(all_records)
|
|
|
|
|
|
|
|
|
314 |
|
315 |
if PARQUET_FILE.exists():
|
316 |
ds_old = Dataset.from_parquet(str(PARQUET_FILE))
|
|
|
320 |
ds_combined = ds_new
|
321 |
|
322 |
ds_combined.to_parquet(str(PARQUET_FILE))
|
323 |
+
logger.info(f"๐ฆ Appended {len(all_records)} records to {PARQUET_FILE}")
|
324 |
existing_ids.update([r["id"] for r in all_records])
|
325 |
all_records.clear()
|
326 |
|
327 |
page_num += 1
|
328 |
|
329 |
+
ds = Dataset.from_parquet(str(PARQUET_FILE))
|
330 |
+
ds = cast(Dataset, ds)
|
331 |
+
ds = remove_empty_texts(ds)
|
332 |
+
ds = remove_duplicate_text(ds)
|
333 |
+
ds = add_token_count(ds)
|
334 |
+
ds = ensure_column_order(ds)
|
335 |
+
|
336 |
+
ds.to_parquet(str(PARQUET_FILE))
|
337 |
+
|
338 |
|
339 |
if __name__ == "__main__":
|
340 |
mp.set_start_method("spawn", force=True)
|
341 |
+
log_path = Path(__file__).parent / "domsdatabasen.log"
|
342 |
+
logging.basicConfig(
|
343 |
+
level=logging.INFO,
|
344 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
345 |
+
handlers=[
|
346 |
+
logging.StreamHandler(),
|
347 |
+
logging.FileHandler(log_path),
|
348 |
+
],
|
349 |
+
)
|
350 |
main()
|
data/domsdatabasen/domsdatabasen.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:132f593c951564e56c262520116bd02eea193f10443b9d12305e130dde16ee99
|
3 |
+
size 123195077
|