demo setup (#1)
Browse files- demo setup (89834caddc84c9dca5d3e00720d599c967a1c3bd)
- ignore .json (fbdf4a4b404070876f8688c3cab7706168e30d30)
- removed .json (5df33cc7fb6dae275a63a981824b0d48b0b98f84)
- data/.gitignore +1 -0
- data/1750/data-00000-of-00003.arrow +3 -0
- data/1750/data-00001-of-00003.arrow +3 -0
- data/1750/data-00002-of-00003.arrow +3 -0
- extract_data.py +477 -0
- log/extract.log +219 -0
- log/problems_1919-1919.txt +68 -0
- pd_check/public_domain_data.parquet +3 -0
- pd_check/public_domain_files.txt +0 -0
- pd_check/select_pd.py +523 -0
- scrape/da_people_large.parquet +3 -0
- scrape/scrape_large.py +306 -0
data/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.json
|
data/1750/data-00000-of-00003.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f00dc4c008c64560993420d2f4fc4e6ba78cb07f6989f85b3c0d190605028578
|
3 |
+
size 4017191736
|
data/1750/data-00001-of-00003.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2b06cc594e0c025a8bcfe8b68a61613e8e5032c5fb344280a06841538ef6699
|
3 |
+
size 4354003752
|
data/1750/data-00002-of-00003.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca0a355fc755b832b87f195ced311e69f72df1e4c5f39afa804a817e2fc95beb
|
3 |
+
size 2700984360
|
extract_data.py
ADDED
@@ -0,0 +1,477 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import string
|
4 |
+
import json
|
5 |
+
import logging
|
6 |
+
import shutil
|
7 |
+
from datetime import datetime
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
import fitz
|
11 |
+
from datasets import Dataset, load_dataset
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
##########################################
|
14 |
+
###### FILL MANUALLY #####################
|
15 |
+
#name of parquet files
|
16 |
+
source = "kb_books"
|
17 |
+
#how many years should go in one parquet file (do not change!)
|
18 |
+
n_chunks = 1
|
19 |
+
#how many docs in 1 parquet
|
20 |
+
n_batch = 5
|
21 |
+
#paths
|
22 |
+
input_path = os.path.join("..","..","kb-books","raw")
|
23 |
+
output_path = os.path.join(".","data")
|
24 |
+
logs = os.path.join(".","log")
|
25 |
+
#first year to process
|
26 |
+
start_year = 1751
|
27 |
+
#last year to process
|
28 |
+
stop_year = 1752
|
29 |
+
#misc folders in data
|
30 |
+
unwanted_folders = ("README.txt","logs")
|
31 |
+
#demo run for testing, if true, only first page is read
|
32 |
+
demo = False
|
33 |
+
#location of reference filenames for public domain documents
|
34 |
+
ref_pd_location = os.path.join(".","pd_check","public_domain_files.txt")
|
35 |
+
with open(ref_pd_location, 'r') as pd_files:
|
36 |
+
ref_pd_list = pd_files.read().splitlines()
|
37 |
+
############################################
|
38 |
+
def find_author_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
39 |
+
"""
|
40 |
+
A function for finding the author(s) from various possible locations in the json metadata.
|
41 |
+
"""
|
42 |
+
try:
|
43 |
+
author = data.get("pnx").get("addata")["au"]
|
44 |
+
except KeyError:
|
45 |
+
author = []
|
46 |
+
try:
|
47 |
+
add_author = data.get("pnx").get("addata")["addau"]
|
48 |
+
except KeyError:
|
49 |
+
add_author = []
|
50 |
+
|
51 |
+
authors = list(set(author)) + list(set(add_author))
|
52 |
+
authors = "; ".join(authors)
|
53 |
+
|
54 |
+
if len(authors) < 1:
|
55 |
+
try:
|
56 |
+
authors = data.get("pnx").get("sort")["author"]
|
57 |
+
authors = "; ".join(authors)
|
58 |
+
except KeyError:
|
59 |
+
pass
|
60 |
+
|
61 |
+
if len(authors) < 1:
|
62 |
+
try:
|
63 |
+
authors = data.get("pnx").get("display")["creator"]
|
64 |
+
authors = "; ".join(authors)
|
65 |
+
except KeyError:
|
66 |
+
authors = "missing"
|
67 |
+
|
68 |
+
return (authors)
|
69 |
+
|
70 |
+
|
71 |
+
def find_title_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
72 |
+
"""
|
73 |
+
A function for finding the title from various possible locations in the json metadata.
|
74 |
+
"""
|
75 |
+
try:
|
76 |
+
title = data.get("pnx").get("display")["title"][0]
|
77 |
+
except KeyError:
|
78 |
+
title = []
|
79 |
+
if len(title) < 1:
|
80 |
+
try:
|
81 |
+
title = data.get("pnx").get("addata")["btitle"][0]
|
82 |
+
except KeyError:
|
83 |
+
pass
|
84 |
+
else:
|
85 |
+
pass
|
86 |
+
if len(title) < 1:
|
87 |
+
try:
|
88 |
+
title = data.get("pnx").get("sort")["title"][0]
|
89 |
+
except KeyError:
|
90 |
+
pass
|
91 |
+
if len(title) < 1:
|
92 |
+
title = "missing"
|
93 |
+
return(title)
|
94 |
+
|
95 |
+
|
96 |
+
def find_digitalization(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
97 |
+
"""
|
98 |
+
A function for finding the digitalization date from various possible locations in the json metadata.
|
99 |
+
"""
|
100 |
+
try:
|
101 |
+
digit = data.get("pnx").get("display")["creationdate"][0]
|
102 |
+
#last 4 digit number in string
|
103 |
+
digit = re.findall(r"\d{4}$",digit)[0]
|
104 |
+
except KeyError:
|
105 |
+
digit = []
|
106 |
+
if len(digit) < 1:
|
107 |
+
try:
|
108 |
+
digit = data.get("pnx").get("addata")["date"][1]
|
109 |
+
digit = re.findall(r"\d{4}$",digit)[0]
|
110 |
+
except KeyError:
|
111 |
+
digit = "missing"
|
112 |
+
return(digit)
|
113 |
+
|
114 |
+
def find_source(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
115 |
+
"""
|
116 |
+
A function for finding source of the document from the json metadata.
|
117 |
+
"""
|
118 |
+
try:
|
119 |
+
doc_source = data.get("pnx").get("display")["lds50"]
|
120 |
+
#last 4 digit number in string
|
121 |
+
doc_source = [i for i in doc_source if "Digi" not in i][0]
|
122 |
+
except (KeyError, IndexError):
|
123 |
+
doc_source = "missing"
|
124 |
+
return doc_source
|
125 |
+
|
126 |
+
#filter alive and missing people
|
127 |
+
def dead_70_yrs_ago(ds):
|
128 |
+
"""filter for the scraped authors to find ones who have died 70 years ago"""
|
129 |
+
birth_miss = False
|
130 |
+
death_miss = False
|
131 |
+
try:
|
132 |
+
birth_yr = int(ds["born"])
|
133 |
+
if birth_yr > 1955:
|
134 |
+
birth = False
|
135 |
+
else:
|
136 |
+
birth = True
|
137 |
+
except ValueError:
|
138 |
+
birth = False
|
139 |
+
birth_miss = True
|
140 |
+
|
141 |
+
try:
|
142 |
+
death_yr = int(ds["died"])
|
143 |
+
if death_yr > 1955:
|
144 |
+
death = False
|
145 |
+
else:
|
146 |
+
death = True
|
147 |
+
except ValueError:
|
148 |
+
death = False
|
149 |
+
death_miss = True
|
150 |
+
|
151 |
+
if ((death and birth) or
|
152 |
+
(death and birth_miss) or
|
153 |
+
(death_miss and birth_yr < 1833)
|
154 |
+
):
|
155 |
+
filtered = True
|
156 |
+
else:
|
157 |
+
filtered = False
|
158 |
+
|
159 |
+
return filtered
|
160 |
+
|
161 |
+
def extract_meta_data(pdf_file:str) -> dict[str,str|int]:
|
162 |
+
"""
|
163 |
+
A function for extracting meta data from the json files
|
164 |
+
includes:
|
165 |
+
- author(s)
|
166 |
+
- title
|
167 |
+
- published
|
168 |
+
- digitalized
|
169 |
+
- source
|
170 |
+
"""
|
171 |
+
try:
|
172 |
+
#load in json
|
173 |
+
json_file = pdf_file[:-3] + "json"
|
174 |
+
f = open(json_file)
|
175 |
+
data = json.load(f)
|
176 |
+
#do stuff
|
177 |
+
authors = find_author_json(data)
|
178 |
+
title = find_title_json(data)
|
179 |
+
digitalized = find_digitalization(data)
|
180 |
+
doc_source = find_source(data)
|
181 |
+
#close
|
182 |
+
f.close()
|
183 |
+
except BaseException:
|
184 |
+
authors = "missing"
|
185 |
+
title = "missing"
|
186 |
+
digitalized = "missing"
|
187 |
+
doc_source = "missing"
|
188 |
+
return authors, title, digitalized, doc_source
|
189 |
+
|
190 |
+
def simplify_name(author:str) -> str:
|
191 |
+
"""
|
192 |
+
function for simplifying repeated single author name separated by ;
|
193 |
+
eg. "Holck, J.; af J. Holck." -> Holck, J.
|
194 |
+
"""
|
195 |
+
simp = author
|
196 |
+
if ";" in author:
|
197 |
+
only_uppercase = [re.findall(r"[A-Z][a-z]*",i) for i in author.split(";")]
|
198 |
+
if len(only_uppercase)==2:
|
199 |
+
if sorted(only_uppercase[0]) == sorted(only_uppercase[1]):
|
200 |
+
simp = re.findall(r"^[^;]*",author)[0]
|
201 |
+
else:
|
202 |
+
pass
|
203 |
+
else:
|
204 |
+
pass
|
205 |
+
else:
|
206 |
+
pass
|
207 |
+
return simp
|
208 |
+
|
209 |
+
def separate_names(author:str) -> list[list[list[str]],int]:
|
210 |
+
"""
|
211 |
+
function for separating different authors and their
|
212 |
+
- separates by ";"
|
213 |
+
- matches strings starting with uppercase letters
|
214 |
+
"""
|
215 |
+
authors = re.findall(r"([^;]*)",author)
|
216 |
+
authors = list(filter(None,authors))
|
217 |
+
authors = [re.findall(r"([A-Z]\w*)",i) for i in authors]
|
218 |
+
n_author = len(authors)
|
219 |
+
return authors, n_author
|
220 |
+
|
221 |
+
def check_copyright(pub_year:int,
|
222 |
+
cover_page_text: str,
|
223 |
+
filename: str,
|
224 |
+
ref_filenames: list[str]
|
225 |
+
) -> bool:
|
226 |
+
"""
|
227 |
+
Function for checking public domain status based on:
|
228 |
+
- year published,
|
229 |
+
- if the digitalising party claims it is copyright free
|
230 |
+
- if the filename can be matched to a name from an outside source
|
231 |
+
"""
|
232 |
+
if pub_year < 1833:
|
233 |
+
public_domain = True
|
234 |
+
elif ("free of copyright" in cover_page_text):
|
235 |
+
public_domain = True
|
236 |
+
elif filename in ref_filenames:
|
237 |
+
public_domain = True
|
238 |
+
else:
|
239 |
+
public_domain = False
|
240 |
+
|
241 |
+
return public_domain
|
242 |
+
|
243 |
+
def convert_pdf_to_dataset(file_name: str,
|
244 |
+
path_to_file: str,
|
245 |
+
demo: bool = False) -> Dataset:
|
246 |
+
|
247 |
+
"""Converts pdf to image and a dataset with rows by page, based on:
|
248 |
+
https://thepythoncode.com/article/convert-pdf-files-to-images-in-python
|
249 |
+
"""
|
250 |
+
#create path, create id
|
251 |
+
input_file=os.path.join(path_to_file,file_name)
|
252 |
+
#whitespaces to underscores, remove: punctuation, alma, pdf
|
253 |
+
doc_id = re.sub(" ","_",file_name)
|
254 |
+
doc_id = ''.join(filter(lambda x: x not in string.punctuation, doc_id))
|
255 |
+
doc_id = re.sub(r"alma|pdf","",doc_id)
|
256 |
+
#get metadata
|
257 |
+
pub_year = file_name[:4]
|
258 |
+
#get metadata (from json)
|
259 |
+
author, title, digitalized, doc_source = extract_meta_data(input_file)
|
260 |
+
# Open the document
|
261 |
+
pdfIn = fitz.open(input_file)
|
262 |
+
data_list = []
|
263 |
+
# Iterate throughout the pages, set range for full doc or demo test runs
|
264 |
+
if demo:
|
265 |
+
page_range = 1
|
266 |
+
else:
|
267 |
+
page_range = pdfIn.page_count
|
268 |
+
|
269 |
+
for pg in range(page_range):
|
270 |
+
page = pdfIn[pg]
|
271 |
+
#get page text
|
272 |
+
page_text = page.get_text()
|
273 |
+
#meta data from frontpage if still missing
|
274 |
+
if pg == 0:
|
275 |
+
#remove \n for easier regexing
|
276 |
+
try:
|
277 |
+
text_solid = re.sub("\n","",page_text)
|
278 |
+
except TypeError:
|
279 |
+
#if no text on frontpage
|
280 |
+
text_solid = "missing"
|
281 |
+
|
282 |
+
if author == "missing":
|
283 |
+
try:
|
284 |
+
author = re.search(r"(?<=Author\(s\):)(.*?)(?=Titel)",text_solid)[0]
|
285 |
+
#trying to clean it a bit
|
286 |
+
author = simplify_name(author)
|
287 |
+
#author, n_author = separate_names(author)
|
288 |
+
except TypeError:
|
289 |
+
#in case no cover page
|
290 |
+
author = "missing"
|
291 |
+
finally:
|
292 |
+
#in case cover page present, but still no author
|
293 |
+
if len(author) == 0:
|
294 |
+
author = "missing"
|
295 |
+
else:
|
296 |
+
pass
|
297 |
+
|
298 |
+
if title == "missing":
|
299 |
+
try:
|
300 |
+
title = re.search(r"(?<=Title:)(.*?)(?=Udgivet)",text_solid)[0]
|
301 |
+
except TypeError:
|
302 |
+
title = "missing"
|
303 |
+
#now that all possible meta data is gathered after first page, see copyright status
|
304 |
+
copyright_free = check_copyright(int(pub_year),
|
305 |
+
text_solid,
|
306 |
+
file_name,
|
307 |
+
ref_pd_list)
|
308 |
+
|
309 |
+
else:
|
310 |
+
#on other pages
|
311 |
+
pass
|
312 |
+
|
313 |
+
if not copyright_free:
|
314 |
+
#if public domain was not confirmed, end looking through pages
|
315 |
+
break
|
316 |
+
|
317 |
+
#create page_image
|
318 |
+
rotate = int(0)
|
319 |
+
# 2, 2 (text should be readable)
|
320 |
+
zoom_x = 2
|
321 |
+
zoom_y = 2
|
322 |
+
# Pre-rotate is to rotate if needed.
|
323 |
+
mat = fitz.Matrix(zoom_x, zoom_y).prerotate(rotate)
|
324 |
+
pix = page.get_pixmap(matrix=mat, alpha=False)
|
325 |
+
page_img = pix.pil_image()
|
326 |
+
page_id = f"{doc_id}_p{pg+1}"
|
327 |
+
#assemble data_doc
|
328 |
+
if type(author) == list:
|
329 |
+
author = "; ".join(author)
|
330 |
+
else:
|
331 |
+
pass
|
332 |
+
|
333 |
+
meta_data ={"doc_id" : doc_id,
|
334 |
+
"page_id" : page_id,
|
335 |
+
"page_image" : page_img,
|
336 |
+
"page_text": page_text,
|
337 |
+
"author": author,
|
338 |
+
"title" : title,
|
339 |
+
"published": pub_year,
|
340 |
+
"digitalized": digitalized,
|
341 |
+
"source": doc_source,
|
342 |
+
"file_name": file_name}
|
343 |
+
data_list.append(meta_data)
|
344 |
+
|
345 |
+
pdfIn.close()
|
346 |
+
|
347 |
+
if copyright_free:
|
348 |
+
ds = Dataset.from_list(data_list)
|
349 |
+
else:
|
350 |
+
ds = "missing"
|
351 |
+
return ds
|
352 |
+
|
353 |
+
def make_year_list(start_year: int, stop_year: int) -> list[str]:
|
354 |
+
"""make a list of file names based on years"""
|
355 |
+
year_list = list(range(start_year, stop_year + 1))
|
356 |
+
year_list = [str(i) for i in year_list]
|
357 |
+
return year_list
|
358 |
+
|
359 |
+
#source filter for ADL (they are not scanned pdfs)
|
360 |
+
adl_filter = lambda ds: ds["source"] != "ADLFBI"
|
361 |
+
|
362 |
+
def split(a, n):
|
363 |
+
"splits list into n roughly equal parts"
|
364 |
+
k, m = divmod(len(a), n)
|
365 |
+
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
|
366 |
+
|
367 |
+
def remove(path):
|
368 |
+
""" param <path> could either be relative or absolute. """
|
369 |
+
if os.path.isfile(path) or os.path.islink(path):
|
370 |
+
os.remove(path) # remove the file
|
371 |
+
elif os.path.isdir(path):
|
372 |
+
shutil.rmtree(path) # remove dir and all contains
|
373 |
+
else:
|
374 |
+
raise ValueError("file {} is not a file or dir.".format(path))
|
375 |
+
|
376 |
+
def reorganize_data(output_path: str, shard_size: str = "5"):
|
377 |
+
""" Loads the temporary data folders in the data path and creates 5GB shards for each year, deletes temporary files
|
378 |
+
"""
|
379 |
+
folders = os.listdir(output_path)
|
380 |
+
temp_folders = [i for i in folders if "_t" in i]
|
381 |
+
if len(temp_folders) == 0:
|
382 |
+
print("DATA ORGANIZED")
|
383 |
+
return
|
384 |
+
print("REORGANIZING DATA...")
|
385 |
+
for t_fold in tqdm(temp_folders):
|
386 |
+
#load all separate parquets into 1 ds
|
387 |
+
data_path = os.path.join(output_path,t_fold)
|
388 |
+
data_set = load_dataset(data_path, split = "train")
|
389 |
+
#save it to appropriately size chunks
|
390 |
+
year_str = t_fold[:-2]
|
391 |
+
new_data_path = os.path.join(output_path,year_str)
|
392 |
+
data_set.save_to_disk(new_data_path, max_shard_size="5GB")
|
393 |
+
#delete temp_folder
|
394 |
+
try :
|
395 |
+
remove(data_path)
|
396 |
+
except PermissionError as e:
|
397 |
+
print(f"{e}")
|
398 |
+
|
399 |
+
|
400 |
+
def main():
|
401 |
+
sub_folders = os.listdir(input_path)
|
402 |
+
for u in unwanted_folders:
|
403 |
+
sub_folders.remove(u)
|
404 |
+
#select years to process
|
405 |
+
year_list = make_year_list(start_year,stop_year)
|
406 |
+
sub_folders = sorted([i for i in sub_folders if i in year_list])
|
407 |
+
#chunking because there's a lot of data
|
408 |
+
chunks = [sub_folders[i:i + n_chunks] for i in range(0, len(sub_folders), n_chunks)]
|
409 |
+
|
410 |
+
logger.info(f"Extracting from PDFs...{sub_folders[0]}-{sub_folders[-1]}")
|
411 |
+
for ch in tqdm(chunks):
|
412 |
+
problem_list =[]
|
413 |
+
for sfolder in ch:
|
414 |
+
#sub folder path e.g /raw/1750
|
415 |
+
sfp = os.path.join(input_path,sfolder)
|
416 |
+
files = [i for i in os.listdir(sfp) if ".pdf" in i]
|
417 |
+
#further chunking because even 1 year is too much memory-wise
|
418 |
+
#batched_files = list(split(files,10))
|
419 |
+
#limit files in 1 parquet
|
420 |
+
batched_files = [files[i:i + n_batch] for i in range(0, len(files), n_batch)]
|
421 |
+
for batch_nr, batch in enumerate(batched_files):
|
422 |
+
ds=[]
|
423 |
+
for i in batch:
|
424 |
+
try:
|
425 |
+
temporary_ds = convert_pdf_to_dataset(i,sfp,demo)
|
426 |
+
if temporary_ds is None:
|
427 |
+
pass
|
428 |
+
else:
|
429 |
+
print(temporary_ds[0]["file_name"])
|
430 |
+
for j in temporary_ds:
|
431 |
+
ds.append(j)
|
432 |
+
|
433 |
+
except BaseException as e:
|
434 |
+
logger.info(f"FILE ERROR: {os.path.join(sfp,i)}")
|
435 |
+
logger.info(f"ERROR: {e}")
|
436 |
+
problem_list.append(i)
|
437 |
+
|
438 |
+
logger.info(f"Assembling Dataset: {ch[0]}-{ch[-1]}, BATCH:{batch_nr}")
|
439 |
+
#if no viable data was saved, do not make a parquet
|
440 |
+
if len(ds) == 0:
|
441 |
+
continue
|
442 |
+
ds = Dataset.from_list(ds)
|
443 |
+
#filter out certain files
|
444 |
+
ds = ds.filter(adl_filter)
|
445 |
+
ds = ds.remove_columns("source")
|
446 |
+
|
447 |
+
#make subfolders by year _t for temporary, will be reorganized
|
448 |
+
save_path = os.path.join(output_path,f"{sfolder}_t",f"{source}_{ch[0]}-{ch[-1]}_{batch_nr}.parquet")
|
449 |
+
ds.to_parquet(save_path)
|
450 |
+
|
451 |
+
logger.info(f"FOLDER DONE: {sfolder}")
|
452 |
+
|
453 |
+
|
454 |
+
if len(problem_list) >= 1:
|
455 |
+
with open(os.path.join(logs,f"problems_{ch[0]}-{ch[-1]}.txt"), 'w') as outfile:
|
456 |
+
outfile.write('\n'.join(str(i) for i in problem_list))
|
457 |
+
else:
|
458 |
+
pass
|
459 |
+
#reorganize the data after running everything
|
460 |
+
ds = None
|
461 |
+
temporary_ds = None
|
462 |
+
del ds
|
463 |
+
del temporary_ds
|
464 |
+
reorganize_data(output_path)
|
465 |
+
|
466 |
+
|
467 |
+
if __name__ == "__main__":
|
468 |
+
log_path = os.path.join(logs,"extract.log")
|
469 |
+
logging.basicConfig(
|
470 |
+
level=logging.INFO,
|
471 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
472 |
+
handlers=[
|
473 |
+
logging.StreamHandler(),
|
474 |
+
logging.FileHandler(log_path),
|
475 |
+
],
|
476 |
+
)
|
477 |
+
main()
|
log/extract.log
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-08-07 10:58:00,739 - INFO - Extracting from PDFs...1750-1750
|
2 |
+
2025-08-07 11:12:13,530 - INFO - Assembling Dataset: 1750-1750, BATCH:0
|
3 |
+
2025-08-07 11:22:09,771 - INFO - Extracting from PDFs...1750-1750
|
4 |
+
2025-08-07 11:36:09,351 - INFO - Assembling Dataset: 1750-1750, BATCH:0
|
5 |
+
2025-08-07 13:27:04,947 - INFO - Extracting from PDFs...1750-1750
|
6 |
+
2025-08-07 13:55:40,145 - INFO - Assembling Dataset: 1750-1750, BATCH:0
|
7 |
+
2025-08-07 14:47:29,002 - INFO - Assembling Dataset: 1750-1750, BATCH:1
|
8 |
+
2025-08-07 15:19:31,498 - INFO - Assembling Dataset: 1750-1750, BATCH:2
|
9 |
+
2025-08-07 15:29:16,231 - INFO - Assembling Dataset: 1750-1750, BATCH:3
|
10 |
+
2025-08-07 15:32:22,518 - INFO - FOLDER DONE: 1750
|
11 |
+
2025-08-07 15:43:54,046 - INFO - Extracting from PDFs...1751-1752
|
12 |
+
2025-08-07 16:28:59,288 - INFO - Assembling Dataset: 1751-1751, BATCH:0
|
13 |
+
2025-08-07 17:14:19,777 - INFO - Extracting from PDFs...1919-1920
|
14 |
+
2025-08-07 17:14:23,075 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Foreløbig Beretning angaaende ..._alma99122800516705763.pdf
|
15 |
+
2025-08-07 17:14:23,075 - INFO - ERROR: string indices must be integers, not 'str'
|
16 |
+
2025-08-07 17:14:23,561 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Bestemmelser vedrørende Slesvi..._alma99121995648405763.pdf
|
17 |
+
2025-08-07 17:14:23,561 - INFO - ERROR: string indices must be integers, not 'str'
|
18 |
+
2025-08-07 17:14:24,220 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Prisniveaustigningen 1914-1919..._alma99122350600405763.pdf
|
19 |
+
2025-08-07 17:14:24,220 - INFO - ERROR: string indices must be integers, not 'str'
|
20 |
+
2025-08-07 17:14:26,469 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Guds Ord som Lyskaster under V..._alma99122350697005763.pdf
|
21 |
+
2025-08-07 17:14:26,469 - INFO - ERROR: string indices must be integers, not 'str'
|
22 |
+
2025-08-07 17:14:29,540 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sangen om Sønderjylland - saml..._alma99122765957305763.pdf
|
23 |
+
2025-08-07 17:14:29,540 - INFO - ERROR: string indices must be integers, not 'str'
|
24 |
+
2025-08-07 17:14:30,332 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Vort Grænsespørgsmaal - Indled..._alma99122765952905763.pdf
|
25 |
+
2025-08-07 17:14:30,332 - INFO - ERROR: string indices must be integers, not 'str'
|
26 |
+
2025-08-07 17:14:31,523 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Udenrigsminister Scavenius og ..._alma99122209593305763.pdf
|
27 |
+
2025-08-07 17:14:31,523 - INFO - ERROR: string indices must be integers, not 'str'
|
28 |
+
2025-08-07 17:14:31,524 - INFO - Assembling Dataset: 1919-1919, BATCH:0
|
29 |
+
2025-08-07 17:14:35,081 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Indberetninger fra de vigtigst..._alma99122350688005763.pdf
|
30 |
+
2025-08-07 17:14:35,082 - INFO - ERROR: string indices must be integers, not 'str'
|
31 |
+
2025-08-07 17:15:35,442 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Kulturens Fallit _alma99122350703905763.pdf
|
32 |
+
2025-08-07 17:15:35,442 - INFO - ERROR: string indices must be integers, not 'str'
|
33 |
+
2025-08-07 17:15:36,883 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Fredsslutningen - i Belysning ..._alma99122350577605763.pdf
|
34 |
+
2025-08-07 17:15:36,883 - INFO - ERROR: string indices must be integers, not 'str'
|
35 |
+
2025-08-07 17:15:37,349 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Verdenskrigen og Fremtidens Un..._alma99122800382805763.pdf
|
36 |
+
2025-08-07 17:15:37,349 - INFO - ERROR: string indices must be integers, not 'str'
|
37 |
+
2025-08-07 17:15:40,008 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Flanderns Løve._alma99122350610105763.pdf
|
38 |
+
2025-08-07 17:15:40,009 - INFO - ERROR: string indices must be integers, not 'str'
|
39 |
+
2025-08-07 17:15:40,009 - INFO - Assembling Dataset: 1919-1919, BATCH:1
|
40 |
+
2025-08-07 17:16:31,412 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Oversigt over Maksimalpriser v..._alma99122350591205763.pdf
|
41 |
+
2025-08-07 17:16:31,412 - INFO - ERROR: string indices must be integers, not 'str'
|
42 |
+
2025-08-07 17:16:37,017 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_En Pilgrimsfærd til Nordfrankr..._alma99122754431905763.pdf
|
43 |
+
2025-08-07 17:16:37,018 - INFO - ERROR: string indices must be integers, not 'str'
|
44 |
+
2025-08-07 17:16:37,479 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Kirkens Velkommen til sønderjy..._alma99122350756505763.pdf
|
45 |
+
2025-08-07 17:16:37,480 - INFO - ERROR: string indices must be integers, not 'str'
|
46 |
+
2025-08-07 17:16:38,024 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Betragtninger over Tidens Begi..._alma99122765836405763.pdf
|
47 |
+
2025-08-07 17:16:38,024 - INFO - ERROR: string indices must be integers, not 'str'
|
48 |
+
2025-08-07 17:16:38,542 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Slien-Dannevirke._alma99122581035505763.pdf
|
49 |
+
2025-08-07 17:16:38,542 - INFO - ERROR: string indices must be integers, not 'str'
|
50 |
+
2025-08-07 17:16:39,098 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Mindegudstjenesten over de und..._alma99122209615005763.pdf
|
51 |
+
2025-08-07 17:16:39,098 - INFO - ERROR: string indices must be integers, not 'str'
|
52 |
+
2025-08-07 17:16:40,033 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Rigets Sydgrænse - tre histori..._alma99122765811005763.pdf
|
53 |
+
2025-08-07 17:16:40,033 - INFO - ERROR: string indices must be integers, not 'str'
|
54 |
+
2025-08-07 17:16:40,034 - INFO - Assembling Dataset: 1919-1919, BATCH:2
|
55 |
+
2025-08-07 17:16:41,816 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Operationerne paa Vestfronten ..._alma99121995481705763.pdf
|
56 |
+
2025-08-07 17:16:41,816 - INFO - ERROR: string indices must be integers, not 'str'
|
57 |
+
2025-08-07 17:16:46,099 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjylland vundet._alma99122765806805763.pdf
|
58 |
+
2025-08-07 17:16:46,099 - INFO - ERROR: string indices must be integers, not 'str'
|
59 |
+
2025-08-07 17:19:15,327 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_I Troens Lys - Begivenhederne ..._alma99122765830205763.pdf
|
60 |
+
2025-08-07 17:19:15,327 - INFO - ERROR: string indices must be integers, not 'str'
|
61 |
+
2025-08-07 17:23:26,768 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Det officielle Danmark i Krige..._alma99122350687105763.pdf
|
62 |
+
2025-08-07 17:23:26,769 - INFO - ERROR: string indices must be integers, not 'str'
|
63 |
+
2025-08-07 17:23:26,769 - INFO - Assembling Dataset: 1919-1919, BATCH:3
|
64 |
+
2025-08-07 17:27:37,060 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjylland - en Hilsen til ..._alma99122754284705763.pdf
|
65 |
+
2025-08-07 17:27:37,060 - INFO - ERROR: string indices must be integers, not 'str'
|
66 |
+
2025-08-07 17:28:51,461 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Regnskab _alma99121946921605763.pdf
|
67 |
+
2025-08-07 17:28:51,461 - INFO - ERROR: string indices must be integers, not 'str'
|
68 |
+
2025-08-07 17:28:51,942 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Verdenskrigen 1914-19 _alma99122350851205763.pdf
|
69 |
+
2025-08-07 17:28:51,942 - INFO - ERROR: string indices must be integers, not 'str'
|
70 |
+
2025-08-07 17:28:54,582 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Under Bolsjevikernes Diktatur ..._alma99122350867905763.pdf
|
71 |
+
2025-08-07 17:28:54,583 - INFO - ERROR: string indices must be integers, not 'str'
|
72 |
+
2025-08-07 17:45:01,025 - INFO - Assembling Dataset: 1919-1919, BATCH:4
|
73 |
+
2025-08-07 17:56:30,936 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Den fransk-russiske Alliance._alma99122765967505763.pdf
|
74 |
+
2025-08-07 17:56:30,936 - INFO - ERROR: string indices must be integers, not 'str'
|
75 |
+
2025-08-07 17:56:31,439 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Pax mundi - Verdensfredsmærket..._alma99122350599905763.pdf
|
76 |
+
2025-08-07 17:56:31,439 - INFO - ERROR: string indices must be integers, not 'str'
|
77 |
+
2025-08-07 17:56:32,213 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Samling III af Udvalgets Beslu..._alma99122350754805763.pdf
|
78 |
+
2025-08-07 17:56:32,213 - INFO - ERROR: string indices must be integers, not 'str'
|
79 |
+
2025-08-07 17:57:13,317 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjydske Krigsfanger i Eng..._alma99122765940705763.pdf
|
80 |
+
2025-08-07 17:57:13,317 - INFO - ERROR: string indices must be integers, not 'str'
|
81 |
+
2025-08-07 17:57:13,653 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Lov om Afvikling af de overord..._alma99122350691905763.pdf
|
82 |
+
2025-08-07 17:57:13,653 - INFO - ERROR: string indices must be integers, not 'str'
|
83 |
+
2025-08-07 17:57:13,654 - INFO - Assembling Dataset: 1919-1919, BATCH:5
|
84 |
+
2025-08-07 17:57:39,309 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sandheden om Danmarks Spærring..._alma99122350751405763.pdf
|
85 |
+
2025-08-07 17:57:39,310 - INFO - ERROR: string indices must be integers, not 'str'
|
86 |
+
2025-08-07 17:57:40,779 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Statistisk aarbog 1919 - annua..._alma99122466365005763.pdf
|
87 |
+
2025-08-07 17:57:40,780 - INFO - ERROR: string indices must be integers, not 'str'
|
88 |
+
2025-08-07 17:57:41,178 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Hvad en ung Vestjyde kan genne..._alma99122350757605763.pdf
|
89 |
+
2025-08-07 17:57:41,178 - INFO - ERROR: string indices must be integers, not 'str'
|
90 |
+
2025-08-07 17:57:42,709 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Nogle bemærkninger om Feltarti..._alma99122350596505763.pdf
|
91 |
+
2025-08-07 17:57:42,709 - INFO - ERROR: string indices must be integers, not 'str'
|
92 |
+
2025-08-07 17:58:35,573 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Et af Krigens Ofre _alma99122800523105763.pdf
|
93 |
+
2025-08-07 17:58:35,573 - INFO - ERROR: string indices must be integers, not 'str'
|
94 |
+
2025-08-07 17:59:19,199 - INFO - Assembling Dataset: 1919-1919, BATCH:6
|
95 |
+
2025-08-07 18:00:19,823 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Det sønderjyske Spørgsmaal - e..._alma99121995602705763.pdf
|
96 |
+
2025-08-07 18:00:19,823 - INFO - ERROR: string indices must be integers, not 'str'
|
97 |
+
2025-08-07 18:00:21,288 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Danmarks Skibsfart under Verde..._alma99122350755305763.pdf
|
98 |
+
2025-08-07 18:00:21,288 - INFO - ERROR: string indices must be integers, not 'str'
|
99 |
+
2025-08-07 18:00:21,478 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Forelæsninger over første Bog ..._alma99122561704405763.pdf
|
100 |
+
2025-08-07 18:00:21,478 - INFO - ERROR: string indices must be integers, not 'str'
|
101 |
+
2025-08-07 18:00:29,850 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Vor Ret! - de tro Flensborgere..._alma99122925774605763.pdf
|
102 |
+
2025-08-07 18:00:29,851 - INFO - ERROR: string indices must be integers, not 'str'
|
103 |
+
2025-08-07 18:00:31,852 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Det slesvigske Regiment._alma99121936924105763.pdf
|
104 |
+
2025-08-07 18:00:31,853 - INFO - ERROR: string indices must be integers, not 'str'
|
105 |
+
2025-08-07 18:00:33,300 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Genforening af en Del af Slesv..._alma99122765823005763.pdf
|
106 |
+
2025-08-07 18:00:33,301 - INFO - ERROR: string indices must be integers, not 'str'
|
107 |
+
2025-08-07 18:00:33,301 - INFO - Assembling Dataset: 1919-1919, BATCH:7
|
108 |
+
2025-08-07 18:00:40,624 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Fest-Avis for Sundbyernes Unde..._alma99122896836605763.pdf
|
109 |
+
2025-08-07 18:00:40,624 - INFO - ERROR: string indices must be integers, not 'str'
|
110 |
+
2025-08-07 18:00:54,236 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Operationerne paa Vestfronten ..._alma99122765927405763.pdf
|
111 |
+
2025-08-07 18:00:54,236 - INFO - ERROR: string indices must be integers, not 'str'
|
112 |
+
2025-08-07 18:01:01,428 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Verdenskrigen 1914-1919._alma99122350691405763.pdf
|
113 |
+
2025-08-07 18:01:01,429 - INFO - ERROR: string indices must be integers, not 'str'
|
114 |
+
2025-08-07 18:01:01,884 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Grænseproblemet og Kanalproble..._alma99122209599805763.pdf
|
115 |
+
2025-08-07 18:01:01,884 - INFO - ERROR: string indices must be integers, not 'str'
|
116 |
+
2025-08-07 18:01:03,311 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Udenlandsk Litteratur vedrøren..._alma99122765943705763.pdf
|
117 |
+
2025-08-07 18:01:03,311 - INFO - ERROR: string indices must be integers, not 'str'
|
118 |
+
2025-08-07 18:01:03,311 - INFO - Assembling Dataset: 1919-1919, BATCH:8
|
119 |
+
2025-08-07 18:01:15,241 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjylland og de politiske ..._alma99122765844805763.pdf
|
120 |
+
2025-08-07 18:01:15,241 - INFO - ERROR: string indices must be integers, not 'str'
|
121 |
+
2025-08-07 18:01:16,700 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Helsingfors under det røde Reg..._alma99122350681805763.pdf
|
122 |
+
2025-08-07 18:01:16,700 - INFO - ERROR: string indices must be integers, not 'str'
|
123 |
+
2025-08-07 18:01:18,077 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Felttogene i Serbien i 1914-15..._alma99122350869305763.pdf
|
124 |
+
2025-08-07 18:01:18,077 - INFO - ERROR: string indices must be integers, not 'str'
|
125 |
+
2025-08-07 18:01:18,517 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Strenge Tider er for Haanden._alma99122350768105763.pdf
|
126 |
+
2025-08-07 18:01:18,517 - INFO - ERROR: string indices must be integers, not 'str'
|
127 |
+
2025-08-07 18:02:10,273 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Ernæringsraadet, oprettet i Fø..._alma99122800372705763.pdf
|
128 |
+
2025-08-07 18:02:10,274 - INFO - ERROR: string indices must be integers, not 'str'
|
129 |
+
2025-08-07 18:02:15,091 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjylland _alma99122539146305763.pdf
|
130 |
+
2025-08-07 18:02:15,091 - INFO - ERROR: string indices must be integers, not 'str'
|
131 |
+
2025-08-07 18:02:15,091 - INFO - Assembling Dataset: 1919-1919, BATCH:9
|
132 |
+
2025-08-07 18:02:51,760 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Krigsepidemiologiske Erfaringe..._alma99122350580605763.pdf
|
133 |
+
2025-08-07 18:02:51,760 - INFO - ERROR: string indices must be integers, not 'str'
|
134 |
+
2025-08-07 18:03:31,198 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Om Danmarks Fremtidsgrænse mod..._alma99122765969905763.pdf
|
135 |
+
2025-08-07 18:03:31,199 - INFO - ERROR: string indices must be integers, not 'str'
|
136 |
+
2025-08-07 18:04:14,472 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Krigen og Kurserne - 1. Bind._alma99122209606305763.pdf
|
137 |
+
2025-08-07 18:04:14,473 - INFO - ERROR: string indices must be integers, not 'str'
|
138 |
+
2025-08-07 18:04:15,385 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Fra de sønderjydske Krigsfange..._alma99122925948505763.pdf
|
139 |
+
2025-08-07 18:04:15,386 - INFO - ERROR: string indices must be integers, not 'str'
|
140 |
+
2025-08-07 18:04:15,890 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Wladimir Uljanoff Lenin - hans..._alma99121936949505763.pdf
|
141 |
+
2025-08-07 18:04:15,891 - INFO - ERROR: string indices must be integers, not 'str'
|
142 |
+
2025-08-07 18:04:15,891 - INFO - Assembling Dataset: 1919-1919, BATCH:10
|
143 |
+
2025-08-07 18:05:04,143 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_De sønderjydske Dage i Københa..._alma99122765812205763.pdf
|
144 |
+
2025-08-07 18:05:04,144 - INFO - ERROR: string indices must be integers, not 'str'
|
145 |
+
2025-08-07 18:05:04,719 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjylland tilbage til Danm..._alma99122765843705763.pdf
|
146 |
+
2025-08-07 18:05:04,719 - INFO - ERROR: string indices must be integers, not 'str'
|
147 |
+
2025-08-07 18:05:05,787 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Sønderjylland 1864-1919 - udgi..._alma99122765830905763.pdf
|
148 |
+
2025-08-07 18:05:05,787 - INFO - ERROR: string indices must be integers, not 'str'
|
149 |
+
2025-08-07 18:05:06,367 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_I Bolschewikernes Rusland - In..._alma99122350686205763.pdf
|
150 |
+
2025-08-07 18:05:06,367 - INFO - ERROR: string indices must be integers, not 'str'
|
151 |
+
2025-08-07 18:05:07,316 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Danske Røster Nord og Syd for ..._alma99122522037305763.pdf
|
152 |
+
2025-08-07 18:05:07,316 - INFO - ERROR: string indices must be integers, not 'str'
|
153 |
+
2025-08-07 18:05:08,337 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Betænkning fra -Det kirkelige ..._alma99122887328005763.pdf
|
154 |
+
2025-08-07 18:05:08,338 - INFO - ERROR: string indices must be integers, not 'str'
|
155 |
+
2025-08-07 18:05:23,299 - INFO - Assembling Dataset: 1919-1919, BATCH:11
|
156 |
+
2025-08-07 18:05:36,530 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_Beretning for Aarene... _alma99122022382005763.pdf
|
157 |
+
2025-08-07 18:05:36,530 - INFO - ERROR: string indices must be integers, not 'str'
|
158 |
+
2025-08-07 18:05:37,959 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_De danske Sønderjyder _alma99121995507405763.pdf
|
159 |
+
2025-08-07 18:05:37,959 - INFO - ERROR: string indices must be integers, not 'str'
|
160 |
+
2025-08-07 18:05:38,495 - INFO - FILE ERROR: ../../kb-books/raw/1919/1919_[Aktstykker vedrørende det søn..._alma99122554538005763.pdf
|
161 |
+
2025-08-07 18:05:38,495 - INFO - ERROR: string indices must be integers, not 'str'
|
162 |
+
2025-08-07 18:05:38,496 - INFO - Assembling Dataset: 1919-1919, BATCH:12
|
163 |
+
2025-08-07 18:05:38,496 - INFO - FOLDER DONE: 1919
|
164 |
+
2025-08-07 18:05:38,655 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Vor Brændselsforsyning - I hvi..._alma99125415640005763.pdf
|
165 |
+
2025-08-07 18:05:38,656 - INFO - ERROR: string indices must be integers, not 'str'
|
166 |
+
2025-08-07 18:05:38,939 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Samling af Love og Bestemmelse..._alma99125698448405763.pdf
|
167 |
+
2025-08-07 18:05:38,939 - INFO - ERROR: string indices must be integers, not 'str'
|
168 |
+
2025-08-07 18:06:22,314 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Studenter vi kaldes - - muntre..._alma99123915203105763.pdf
|
169 |
+
2025-08-07 18:06:22,315 - INFO - ERROR: string indices must be integers, not 'str'
|
170 |
+
2025-08-07 18:09:21,503 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Ardjun - en indisk Drengs Livs..._alma99125107098505763.pdf
|
171 |
+
2025-08-07 18:09:21,504 - INFO - ERROR: string indices must be integers, not 'str'
|
172 |
+
2025-08-07 18:14:09,705 - INFO - Assembling Dataset: 1920-1920, BATCH:0
|
173 |
+
2025-08-07 18:20:46,548 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_[Indberetning til Niels Neerga..._alma99122765960505763.pdf
|
174 |
+
2025-08-07 18:20:46,549 - INFO - ERROR: string indices must be integers, not 'str'
|
175 |
+
2025-08-07 18:20:46,670 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Odense Vin Kompagni - 1895 - 1..._alma99124696449805763.pdf
|
176 |
+
2025-08-07 18:20:46,670 - INFO - ERROR: string indices must be integers, not 'str'
|
177 |
+
2025-08-07 18:21:09,159 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Fribytterdrømme - Digte._alma99125224825805763.pdf
|
178 |
+
2025-08-07 18:21:09,159 - INFO - ERROR: string indices must be integers, not 'str'
|
179 |
+
2025-08-07 18:21:09,263 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Kaddara - Folkelivsbilleder fr..._alma99125224853505763.pdf
|
180 |
+
2025-08-07 18:21:09,263 - INFO - ERROR: string indices must be integers, not 'str'
|
181 |
+
2025-08-07 18:21:09,500 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Illustreret Havebog - 1.-3. De..._alma99125466829705763.pdf
|
182 |
+
2025-08-07 18:21:09,501 - INFO - ERROR: string indices must be integers, not 'str'
|
183 |
+
2025-08-07 18:21:09,501 - INFO - Assembling Dataset: 1920-1920, BATCH:1
|
184 |
+
2025-08-07 18:21:32,283 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Hvorfor vi bør have et Hushold..._alma99125466879005763.pdf
|
185 |
+
2025-08-07 18:21:32,284 - INFO - ERROR: string indices must be integers, not 'str'
|
186 |
+
2025-08-07 18:22:31,758 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Krigsfangerne i Rusland og Sib..._alma99122403446405763.pdf
|
187 |
+
2025-08-07 18:22:31,759 - INFO - ERROR: string indices must be integers, not 'str'
|
188 |
+
2025-08-07 18:22:32,052 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Hafis' Sange _alma99124018440005763.pdf
|
189 |
+
2025-08-07 18:22:32,052 - INFO - ERROR: string indices must be integers, not 'str'
|
190 |
+
2025-08-07 18:22:32,265 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Statshusmandsbevægelsen eller ..._alma99124018373405763.pdf
|
191 |
+
2025-08-07 18:22:32,266 - INFO - ERROR: string indices must be integers, not 'str'
|
192 |
+
2025-08-07 18:22:32,392 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Bibelhistorie med et Uddrag af..._alma99124095865405763.pdf
|
193 |
+
2025-08-07 18:22:32,392 - INFO - ERROR: string indices must be integers, not 'str'
|
194 |
+
2025-08-07 18:22:32,392 - INFO - Assembling Dataset: 1920-1920, BATCH:2
|
195 |
+
2025-08-07 18:23:12,144 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Smaatryk - udg. af Forening fo..._alma99125729443005763.pdf
|
196 |
+
2025-08-07 18:23:12,144 - INFO - ERROR: string indices must be integers, not 'str'
|
197 |
+
2025-08-07 18:23:12,302 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Spiritismen - Kan man sætte si..._alma99125415639805763.pdf
|
198 |
+
2025-08-07 18:23:12,302 - INFO - ERROR: string indices must be integers, not 'str'
|
199 |
+
2025-08-07 18:23:12,521 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Lolland-Falsters Forstmandsfor..._alma99123824393005763.pdf
|
200 |
+
2025-08-07 18:23:12,521 - INFO - ERROR: string indices must be integers, not 'str'
|
201 |
+
2025-08-07 18:23:12,677 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Nedgang og Genoprettelse - Hel..._alma99124088959005763.pdf
|
202 |
+
2025-08-07 18:23:12,677 - INFO - ERROR: string indices must be integers, not 'str'
|
203 |
+
2025-08-07 18:23:12,852 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Udvalgsbetænkning vedrørende d..._alma99123793057205763.pdf
|
204 |
+
2025-08-07 18:23:12,853 - INFO - ERROR: string indices must be integers, not 'str'
|
205 |
+
2025-08-07 18:23:13,010 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Christen Jensen, Vindt Mølle o..._alma99124095564405763.pdf
|
206 |
+
2025-08-07 18:23:13,010 - INFO - ERROR: string indices must be integers, not 'str'
|
207 |
+
2025-08-07 18:23:13,272 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Jødefaren. De verdensberygtede..._alma99124073618905763.pdf
|
208 |
+
2025-08-07 18:23:13,272 - INFO - ERROR: string indices must be integers, not 'str'
|
209 |
+
2025-08-07 18:23:13,272 - INFO - Assembling Dataset: 1920-1920, BATCH:3
|
210 |
+
2025-08-07 18:23:16,085 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Fodfolket - Organisation, Bevæ..._alma99122765839205763.pdf
|
211 |
+
2025-08-07 18:23:16,085 - INFO - ERROR: string indices must be integers, not 'str'
|
212 |
+
2025-08-07 18:23:16,311 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Jødefaren - Bevismateriale _alma99124073568105763.pdf
|
213 |
+
2025-08-07 18:23:16,311 - INFO - ERROR: string indices must be integers, not 'str'
|
214 |
+
2025-08-07 18:23:16,467 - INFO - FILE ERROR: ../../kb-books/raw/1920/1920_Vejledning til Anvendelse af L..._alma99122943569005763.pdf
|
215 |
+
2025-08-07 18:23:16,468 - INFO - ERROR: string indices must be integers, not 'str'
|
216 |
+
2025-08-11 13:27:55,385 - INFO - Extracting from PDFs...1751-1752
|
217 |
+
2025-08-11 14:20:16,074 - INFO - Assembling Dataset: 1751-1751, BATCH:0
|
218 |
+
2025-08-11 15:12:51,632 - INFO - Extracting from PDFs...1751-1752
|
219 |
+
2025-08-11 15:57:32,435 - INFO - Assembling Dataset: 1751-1751, BATCH:0
|
log/problems_1919-1919.txt
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1919_Foreløbig Beretning angaaende ..._alma99122800516705763.pdf
|
2 |
+
1919_Bestemmelser vedrørende Slesvi..._alma99121995648405763.pdf
|
3 |
+
1919_Prisniveaustigningen 1914-1919..._alma99122350600405763.pdf
|
4 |
+
1919_Guds Ord som Lyskaster under V..._alma99122350697005763.pdf
|
5 |
+
1919_Sangen om Sønderjylland - saml..._alma99122765957305763.pdf
|
6 |
+
1919_Vort Grænsespørgsmaal - Indled..._alma99122765952905763.pdf
|
7 |
+
1919_Udenrigsminister Scavenius og ..._alma99122209593305763.pdf
|
8 |
+
1919_Indberetninger fra de vigtigst..._alma99122350688005763.pdf
|
9 |
+
1919_Kulturens Fallit _alma99122350703905763.pdf
|
10 |
+
1919_Fredsslutningen - i Belysning ..._alma99122350577605763.pdf
|
11 |
+
1919_Verdenskrigen og Fremtidens Un..._alma99122800382805763.pdf
|
12 |
+
1919_Flanderns Løve._alma99122350610105763.pdf
|
13 |
+
1919_Oversigt over Maksimalpriser v..._alma99122350591205763.pdf
|
14 |
+
1919_En Pilgrimsfærd til Nordfrankr..._alma99122754431905763.pdf
|
15 |
+
1919_Kirkens Velkommen til sønderjy..._alma99122350756505763.pdf
|
16 |
+
1919_Betragtninger over Tidens Begi..._alma99122765836405763.pdf
|
17 |
+
1919_Slien-Dannevirke._alma99122581035505763.pdf
|
18 |
+
1919_Mindegudstjenesten over de und..._alma99122209615005763.pdf
|
19 |
+
1919_Rigets Sydgrænse - tre histori..._alma99122765811005763.pdf
|
20 |
+
1919_Operationerne paa Vestfronten ..._alma99121995481705763.pdf
|
21 |
+
1919_Sønderjylland vundet._alma99122765806805763.pdf
|
22 |
+
1919_I Troens Lys - Begivenhederne ..._alma99122765830205763.pdf
|
23 |
+
1919_Det officielle Danmark i Krige..._alma99122350687105763.pdf
|
24 |
+
1919_Sønderjylland - en Hilsen til ..._alma99122754284705763.pdf
|
25 |
+
1919_Regnskab _alma99121946921605763.pdf
|
26 |
+
1919_Verdenskrigen 1914-19 _alma99122350851205763.pdf
|
27 |
+
1919_Under Bolsjevikernes Diktatur ..._alma99122350867905763.pdf
|
28 |
+
1919_Den fransk-russiske Alliance._alma99122765967505763.pdf
|
29 |
+
1919_Pax mundi - Verdensfredsmærket..._alma99122350599905763.pdf
|
30 |
+
1919_Samling III af Udvalgets Beslu..._alma99122350754805763.pdf
|
31 |
+
1919_Sønderjydske Krigsfanger i Eng..._alma99122765940705763.pdf
|
32 |
+
1919_Lov om Afvikling af de overord..._alma99122350691905763.pdf
|
33 |
+
1919_Sandheden om Danmarks Spærring..._alma99122350751405763.pdf
|
34 |
+
1919_Statistisk aarbog 1919 - annua..._alma99122466365005763.pdf
|
35 |
+
1919_Hvad en ung Vestjyde kan genne..._alma99122350757605763.pdf
|
36 |
+
1919_Nogle bemærkninger om Feltarti..._alma99122350596505763.pdf
|
37 |
+
1919_Et af Krigens Ofre _alma99122800523105763.pdf
|
38 |
+
1919_Det sønderjyske Spørgsmaal - e..._alma99121995602705763.pdf
|
39 |
+
1919_Danmarks Skibsfart under Verde..._alma99122350755305763.pdf
|
40 |
+
1919_Forelæsninger over første Bog ..._alma99122561704405763.pdf
|
41 |
+
1919_Vor Ret! - de tro Flensborgere..._alma99122925774605763.pdf
|
42 |
+
1919_Det slesvigske Regiment._alma99121936924105763.pdf
|
43 |
+
1919_Genforening af en Del af Slesv..._alma99122765823005763.pdf
|
44 |
+
1919_Fest-Avis for Sundbyernes Unde..._alma99122896836605763.pdf
|
45 |
+
1919_Operationerne paa Vestfronten ..._alma99122765927405763.pdf
|
46 |
+
1919_Verdenskrigen 1914-1919._alma99122350691405763.pdf
|
47 |
+
1919_Grænseproblemet og Kanalproble..._alma99122209599805763.pdf
|
48 |
+
1919_Udenlandsk Litteratur vedrøren..._alma99122765943705763.pdf
|
49 |
+
1919_Sønderjylland og de politiske ..._alma99122765844805763.pdf
|
50 |
+
1919_Helsingfors under det røde Reg..._alma99122350681805763.pdf
|
51 |
+
1919_Felttogene i Serbien i 1914-15..._alma99122350869305763.pdf
|
52 |
+
1919_Strenge Tider er for Haanden._alma99122350768105763.pdf
|
53 |
+
1919_Ernæringsraadet, oprettet i Fø..._alma99122800372705763.pdf
|
54 |
+
1919_Sønderjylland _alma99122539146305763.pdf
|
55 |
+
1919_Krigsepidemiologiske Erfaringe..._alma99122350580605763.pdf
|
56 |
+
1919_Om Danmarks Fremtidsgrænse mod..._alma99122765969905763.pdf
|
57 |
+
1919_Krigen og Kurserne - 1. Bind._alma99122209606305763.pdf
|
58 |
+
1919_Fra de sønderjydske Krigsfange..._alma99122925948505763.pdf
|
59 |
+
1919_Wladimir Uljanoff Lenin - hans..._alma99121936949505763.pdf
|
60 |
+
1919_De sønderjydske Dage i Københa..._alma99122765812205763.pdf
|
61 |
+
1919_Sønderjylland tilbage til Danm..._alma99122765843705763.pdf
|
62 |
+
1919_Sønderjylland 1864-1919 - udgi..._alma99122765830905763.pdf
|
63 |
+
1919_I Bolschewikernes Rusland - In..._alma99122350686205763.pdf
|
64 |
+
1919_Danske Røster Nord og Syd for ..._alma99122522037305763.pdf
|
65 |
+
1919_Betænkning fra -Det kirkelige ..._alma99122887328005763.pdf
|
66 |
+
1919_Beretning for Aarene... _alma99122022382005763.pdf
|
67 |
+
1919_De danske Sønderjyder _alma99121995507405763.pdf
|
68 |
+
1919_[Aktstykker vedrørende det søn..._alma99122554538005763.pdf
|
pd_check/public_domain_data.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:732ec80410a0cd94bc0a5362d16ab9efb4430058b3aa28164ff93eccf9f5eb02
|
3 |
+
size 759609
|
pd_check/public_domain_files.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pd_check/select_pd.py
ADDED
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import string
|
4 |
+
import json
|
5 |
+
import logging
|
6 |
+
import itertools
|
7 |
+
from datetime import datetime
|
8 |
+
from tqdm import tqdm
|
9 |
+
from difflib import SequenceMatcher
|
10 |
+
|
11 |
+
|
12 |
+
import fitz
|
13 |
+
from datasets import Dataset, load_dataset
|
14 |
+
|
15 |
+
def int_to_roman(number:int) -> str:
|
16 |
+
""" integers to roman numerals"""
|
17 |
+
ROMAN = [
|
18 |
+
(1000, "M"),
|
19 |
+
( 900, "CM"),
|
20 |
+
( 500, "D"),
|
21 |
+
( 400, "CD"),
|
22 |
+
( 100, "C"),
|
23 |
+
( 90, "XC"),
|
24 |
+
( 50, "L"),
|
25 |
+
( 40, "XL"),
|
26 |
+
( 10, "X"),
|
27 |
+
( 9, "IX"),
|
28 |
+
( 5, "V"),
|
29 |
+
( 4, "IV"),
|
30 |
+
( 1, "I"),
|
31 |
+
]
|
32 |
+
result = []
|
33 |
+
for (arabic, roman) in ROMAN:
|
34 |
+
(factor, number) = divmod(number, arabic)
|
35 |
+
result.append(roman * factor)
|
36 |
+
if number == 0:
|
37 |
+
break
|
38 |
+
return "".join(result)
|
39 |
+
|
40 |
+
def process_non_upper_names(author:str) -> str:
|
41 |
+
"""
|
42 |
+
returns the end of a name, in which there are
|
43 |
+
non-uppercase parts
|
44 |
+
|
45 |
+
2 special cases
|
46 |
+
- some middle name indicating a place "von,af,de etc...
|
47 |
+
- some end name indicating a trait "den yngre, den ælder etc...
|
48 |
+
1 special case
|
49 |
+
- some numeral used for royalty "Christian 1."
|
50 |
+
"""
|
51 |
+
mid_name_list = [' af ',
|
52 |
+
' da ',
|
53 |
+
' de ',
|
54 |
+
' du ',
|
55 |
+
' la ',
|
56 |
+
' le ',
|
57 |
+
' til ',
|
58 |
+
' van ',
|
59 |
+
' von ',
|
60 |
+
' zu '
|
61 |
+
]
|
62 |
+
end_name_list = [' den Gamle',
|
63 |
+
' den Hellige',
|
64 |
+
' den Store',
|
65 |
+
' den Unge',
|
66 |
+
' den eldre',
|
67 |
+
' den yngre',
|
68 |
+
' den yngste',
|
69 |
+
' den Ældre',
|
70 |
+
' den ældre',
|
71 |
+
' junior',
|
72 |
+
' the Younger'
|
73 |
+
]
|
74 |
+
#get everythin after midname
|
75 |
+
mid_name = [i for i in mid_name_list if i in author]
|
76 |
+
if len(mid_name) != 0:
|
77 |
+
mid_name = re.findall(rf"{mid_name[0]}.*", author)
|
78 |
+
#get end, if midname already has it, don't
|
79 |
+
end_name = [i for i in end_name_list if i in author]
|
80 |
+
if len(end_name) != 0 and len(mid_name) != 0 and end_name[0] in mid_name[0]:
|
81 |
+
end_name = []
|
82 |
+
full_name = [mid_name,end_name]
|
83 |
+
full_name = "".join(sum(full_name, []))
|
84 |
+
return full_name
|
85 |
+
|
86 |
+
def monogram_list(author: str) -> list[str]:
|
87 |
+
"""
|
88 |
+
creates a list of first name abbreviations
|
89 |
+
John Alex Sample --> [John Alex Sample, J Alex Sample, J A Sample, John A Sample]
|
90 |
+
|
91 |
+
special case endings added by
|
92 |
+
process_non_upper_names(author:str) -> str:
|
93 |
+
int_to_roman(number:int) -> str:
|
94 |
+
"""
|
95 |
+
#remove special cases, they will be re-added after the last name at the end
|
96 |
+
spec_end = process_non_upper_names(author)
|
97 |
+
author = author.replace(spec_end,"")
|
98 |
+
|
99 |
+
abbreviation_list = [author]
|
100 |
+
#split on whitespace
|
101 |
+
author = re.split(r"[\s]+",author)
|
102 |
+
#-1 because last name stays long
|
103 |
+
in_list = author[:-1]
|
104 |
+
#get all combinations of list elements as sublists
|
105 |
+
out_list = [c for i in range(len(in_list)) for c in itertools.combinations(in_list, i+1)]
|
106 |
+
out_list = [x[0] if len(x) == 1 else list(x) for x in out_list]
|
107 |
+
#make single entries into one item lists
|
108 |
+
out_list = [["".join(list(x))] if type(x) is not list else x for x in out_list]
|
109 |
+
|
110 |
+
for name_combos in out_list:
|
111 |
+
name_short = []
|
112 |
+
name_index = []
|
113 |
+
#for each list of combinations
|
114 |
+
#the names in that combination will be shortened
|
115 |
+
for name in name_combos:
|
116 |
+
if name in author:
|
117 |
+
#find the elements in that exact combination
|
118 |
+
#replace the names with the first character
|
119 |
+
og_index = author.index(name)
|
120 |
+
abbreviation = name[0]
|
121 |
+
#list indexes and replacements for those indexes
|
122 |
+
name_index.append(og_index)
|
123 |
+
name_short.append(abbreviation)
|
124 |
+
|
125 |
+
#replace
|
126 |
+
abbr_author = author.copy()
|
127 |
+
for (name_index, name_short) in zip(name_index, name_short):
|
128 |
+
abbr_author[name_index] = name_short
|
129 |
+
abbreviation_list.append(" ".join(abbr_author))
|
130 |
+
#remove dupes, add special endings
|
131 |
+
abbreviation_list = list(set(abbreviation_list))
|
132 |
+
|
133 |
+
if spec_end != "":
|
134 |
+
spec_end_l = [i + spec_end for i in abbreviation_list]
|
135 |
+
abbreviation_list = sum([abbreviation_list,spec_end_l],[])
|
136 |
+
if sum(1 for i in author if i.isdigit()) > 0:
|
137 |
+
numeric = [i for i in author if i.isdigit()][0]
|
138 |
+
rom_num = int_to_roman(int(numeric))
|
139 |
+
rom_num_l = [i.replace(numeric,rom_num) for i in abbreviation_list]
|
140 |
+
abbreviation_list = sum([abbreviation_list,rom_num_l],[])
|
141 |
+
|
142 |
+
return abbreviation_list
|
143 |
+
|
144 |
+
def clean_names(authors:str) -> list[str]:
|
145 |
+
"""
|
146 |
+
Takes a string of author names separated by ";" and reformats
|
147 |
+
- returns a list
|
148 |
+
- switches surname firstname order if "," present
|
149 |
+
- removes parentheses
|
150 |
+
e.g
|
151 |
+
from:
|
152 |
+
" Jens Sample ; Example, Hans ; S. A. Ample (example)"
|
153 |
+
to:
|
154 |
+
["Jens Sample","Hans Example","S. A. Ample"]
|
155 |
+
"""
|
156 |
+
authors = authors.split(";")
|
157 |
+
#If "," --> split item, reverse it, rejoin it. Otherwise don't
|
158 |
+
authors = ["".join(x.split(",")[::-1]) if "," in x else x for x in authors]
|
159 |
+
#If parentheses, remove them.
|
160 |
+
authors = [re.sub(r"[\(].*?[\)]", "", x) if ("(" or ")") in x else x for x in authors]
|
161 |
+
#separate elements by char, make punctuation whitespace, join again,
|
162 |
+
authors = ["".join([ch if ch not in string.punctuation else " " for ch in el]).strip() for el in authors]
|
163 |
+
#If two uppercase characters are beside, separate them AB -> A B
|
164 |
+
authors = ["".join([" " + ch if el[chr_count-1].isupper() and ch.isupper() else ch for chr_count, ch in enumerate(el)]) for el in authors]
|
165 |
+
#remove excess spaces (inside and from the sides) and empty elements
|
166 |
+
authors = [re.sub(' +', ' ', i).strip() for i in authors if i != ""]
|
167 |
+
|
168 |
+
return authors
|
169 |
+
|
170 |
+
def lower_names(authors: list[str]) -> list[str]:
|
171 |
+
"""
|
172 |
+
Takes a list of author names lowercases them for easier comparison
|
173 |
+
e.g
|
174 |
+
from:
|
175 |
+
["Jens Sample","Hans Example","S. A. Ample"]
|
176 |
+
to:
|
177 |
+
["jenssample","hansexample","saample"]
|
178 |
+
"""
|
179 |
+
#remove dots and spaces, lowercase
|
180 |
+
authors = [x.replace(".","") for x in authors]
|
181 |
+
authors = [x.replace(" ","") for x in authors]
|
182 |
+
authors = [x.lower() for x in authors]
|
183 |
+
return authors
|
184 |
+
|
185 |
+
def add_abbreviations(ds:Dataset)->Dataset:
|
186 |
+
"""add abbreviations to single author names in the scraped reference dataset"""
|
187 |
+
#clean it first to remove parentheses
|
188 |
+
ds["c_name"] = clean_names(ds["name"])
|
189 |
+
ds["abbrevs"] = monogram_list(
|
190 |
+
clean_names(ds["name"])[0]
|
191 |
+
)
|
192 |
+
return ds
|
193 |
+
|
194 |
+
def separate_authors(ds:Dataset)->Dataset:
|
195 |
+
"""separate authors in the pdf metadata"""
|
196 |
+
ds["c_author"] = clean_names(ds["author"])
|
197 |
+
ds["n_author"] = len(ds["c_author"])
|
198 |
+
ds["abbrevs"] = [monogram_list(i) for i in ds["c_author"]]
|
199 |
+
|
200 |
+
return ds
|
201 |
+
|
202 |
+
def check_abbrevs(name:str) -> bool:
|
203 |
+
"""checks if there abbreviations in a name"""
|
204 |
+
newstr = " "+ name
|
205 |
+
#any whitespace any word any whitespace pattern match
|
206 |
+
if re.match(r"\s\w\s",newstr) is None:
|
207 |
+
return False
|
208 |
+
else:
|
209 |
+
return True
|
210 |
+
|
211 |
+
def dead_70_yrs_ago(ds):
|
212 |
+
"""filter for the scraped authors to find ones who have died 70 years ago"""
|
213 |
+
birth_miss = False
|
214 |
+
death_miss = False
|
215 |
+
try:
|
216 |
+
birth_yr = int(ds["born"])
|
217 |
+
if birth_yr > 1955:
|
218 |
+
birth = False
|
219 |
+
else:
|
220 |
+
birth = True
|
221 |
+
except ValueError:
|
222 |
+
birth = False
|
223 |
+
birth_miss = True
|
224 |
+
|
225 |
+
try:
|
226 |
+
death_yr = int(ds["died"])
|
227 |
+
if death_yr > 1955:
|
228 |
+
death = False
|
229 |
+
else:
|
230 |
+
death = True
|
231 |
+
except ValueError:
|
232 |
+
death = False
|
233 |
+
death_miss = True
|
234 |
+
|
235 |
+
#both years are before 1955 and none of them are missing
|
236 |
+
if (death and birth and not birth_miss and not death_miss):
|
237 |
+
filtered = True
|
238 |
+
else:
|
239 |
+
filtered = False
|
240 |
+
|
241 |
+
return filtered
|
242 |
+
|
243 |
+
def match_by_name(name : str,
|
244 |
+
ds_filt: dict[str|str]) -> list[list[str],list[str]] :
|
245 |
+
"""
|
246 |
+
Match a name to another list of meta data
|
247 |
+
returns [name + birth and death dates], [reference link]
|
248 |
+
"""
|
249 |
+
ref_names = [x for xs in ds_filt["c_name"] for x in xs]
|
250 |
+
name_matches = []
|
251 |
+
link_matches = []
|
252 |
+
#match the name to the list off all clean names,
|
253 |
+
#get the more informative name from the ds based on index
|
254 |
+
found_author = "".join(set([name]).intersection(ref_names))
|
255 |
+
found_names = [ds_filt["name_yr"][indx] for indx in [i for i in range(len(ref_names)) if ref_names[i]==found_author]]
|
256 |
+
found_links = [ds_filt["link"][indx] for indx in [i for i in range(len(ref_names)) if ref_names[i]==found_author]]
|
257 |
+
#append the elements, not the list
|
258 |
+
for i in found_names:
|
259 |
+
name_matches.append(i)
|
260 |
+
for i in found_links:
|
261 |
+
link_matches.append(i)
|
262 |
+
|
263 |
+
return name_matches, link_matches
|
264 |
+
|
265 |
+
|
266 |
+
def match_by_abbreviation(abbrev : str, ds_filt: dict[str|str]) -> list[list[str],list[str]] :
|
267 |
+
"""
|
268 |
+
Match a name with an abbreviated word in it to another list of meta data.
|
269 |
+
returns [name + birth and death dates], [reference link]
|
270 |
+
"""
|
271 |
+
name_matches = []
|
272 |
+
link_matches = []
|
273 |
+
#find all occurrences of the abbreviation match
|
274 |
+
a_m = [x["abbrevs"] for x in ds_filt if abbrev in x["abbrevs"]]
|
275 |
+
#get the name_yr variable for those matches based on index
|
276 |
+
found_names = [ds_filt[i]["name_yr"] for i in range(len(ds_filt)) for x in a_m if ds_filt[i]["abbrevs"] == x]
|
277 |
+
#same with links
|
278 |
+
found_links = [ds_filt[i]["link"] for i in range(len(ds_filt)) for x in a_m if ds_filt[i]["abbrevs"] == x]
|
279 |
+
#remove duplicates
|
280 |
+
found_names = list(set(found_names))
|
281 |
+
found_links = list(set(found_links))
|
282 |
+
|
283 |
+
#append the elements, not the list
|
284 |
+
for i in found_names:
|
285 |
+
name_matches.append(i)
|
286 |
+
for i in found_links:
|
287 |
+
link_matches.append(i)
|
288 |
+
#remove duplicates
|
289 |
+
name_matches = list(set(name_matches))
|
290 |
+
link_matches = list(set(link_matches))
|
291 |
+
|
292 |
+
return name_matches, link_matches
|
293 |
+
|
294 |
+
def find_author_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
295 |
+
"""
|
296 |
+
A function for finding the author(s) from various possible locations in the json metadata.
|
297 |
+
"""
|
298 |
+
try:
|
299 |
+
author = data.get("pnx").get("addata")["au"]
|
300 |
+
except KeyError:
|
301 |
+
author = []
|
302 |
+
try:
|
303 |
+
add_author = data.get("pnx").get("addata")["addau"]
|
304 |
+
except KeyError:
|
305 |
+
add_author = []
|
306 |
+
|
307 |
+
authors = list(set(author)) + list(set(add_author))
|
308 |
+
authors = "; ".join(authors)
|
309 |
+
|
310 |
+
if len(authors) < 1:
|
311 |
+
try:
|
312 |
+
authors = data.get("pnx").get("sort")["author"]
|
313 |
+
authors = "; ".join(authors)
|
314 |
+
except KeyError:
|
315 |
+
pass
|
316 |
+
|
317 |
+
if len(authors) < 1:
|
318 |
+
try:
|
319 |
+
authors = data.get("pnx").get("display")["creator"]
|
320 |
+
authors = "; ".join(authors)
|
321 |
+
except KeyError:
|
322 |
+
authors = "missing"
|
323 |
+
|
324 |
+
return (authors)
|
325 |
+
|
326 |
+
|
327 |
+
def find_title_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
328 |
+
"""
|
329 |
+
A function for finding the title from various possible locations in the json metadata.
|
330 |
+
"""
|
331 |
+
try:
|
332 |
+
title = data.get("pnx").get("display")["title"][0]
|
333 |
+
except KeyError:
|
334 |
+
title = []
|
335 |
+
if len(title) < 1:
|
336 |
+
try:
|
337 |
+
title = data.get("pnx").get("addata")["btitle"][0]
|
338 |
+
except KeyError:
|
339 |
+
pass
|
340 |
+
else:
|
341 |
+
pass
|
342 |
+
if len(title) < 1:
|
343 |
+
try:
|
344 |
+
title = data.get("pnx").get("sort")["title"][0]
|
345 |
+
except KeyError:
|
346 |
+
pass
|
347 |
+
if len(title) < 1:
|
348 |
+
title = "missing"
|
349 |
+
return(title)
|
350 |
+
|
351 |
+
|
352 |
+
def find_digitalization(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
353 |
+
"""
|
354 |
+
A function for finding the digitalisation date from various possible locations in the json metadata.
|
355 |
+
"""
|
356 |
+
try:
|
357 |
+
digit = data.get("pnx").get("display")["creationdate"][0]
|
358 |
+
#last 4 digit number in string
|
359 |
+
digit = re.findall(r"\d{4}$",digit)[0]
|
360 |
+
except KeyError:
|
361 |
+
digit = []
|
362 |
+
if len(digit) < 1:
|
363 |
+
try:
|
364 |
+
digit = data.get("pnx").get("addata")["date"][1]
|
365 |
+
digit = re.findall(r"\d{4}$",digit)[0]
|
366 |
+
except KeyError:
|
367 |
+
digit = "missing"
|
368 |
+
return(digit)
|
369 |
+
|
370 |
+
def find_source(data: dict[str,dict[str,dict[str,str]]]) -> str:
|
371 |
+
"""
|
372 |
+
A function for finding source of the document from the json metadata.
|
373 |
+
"""
|
374 |
+
try:
|
375 |
+
doc_source = data.get("pnx").get("display")["lds50"]
|
376 |
+
#last 4 digit number in string
|
377 |
+
doc_source = [i for i in doc_source if "Digi" not in i][0]
|
378 |
+
except (KeyError, IndexError):
|
379 |
+
doc_source = "missing"
|
380 |
+
return doc_source
|
381 |
+
|
382 |
+
|
383 |
+
def extract_meta_data(pdf_file:str) -> dict[str,str|int]:
|
384 |
+
"""
|
385 |
+
A function for extracting meta data from the json files
|
386 |
+
includes:
|
387 |
+
- author(s)
|
388 |
+
- title
|
389 |
+
- published
|
390 |
+
- digitalized
|
391 |
+
- source
|
392 |
+
"""
|
393 |
+
try:
|
394 |
+
#load in json
|
395 |
+
json_file = pdf_file[:-3] + "json"
|
396 |
+
f = open(json_file)
|
397 |
+
data = json.load(f)
|
398 |
+
#do stuff
|
399 |
+
authors = find_author_json(data)
|
400 |
+
title = find_title_json(data)
|
401 |
+
digitalized = find_digitalization(data)
|
402 |
+
doc_source = find_source(data)
|
403 |
+
#close
|
404 |
+
f.close()
|
405 |
+
except BaseException:
|
406 |
+
authors = "missing"
|
407 |
+
title = "missing"
|
408 |
+
digitalized = "missing"
|
409 |
+
doc_source = "missing"
|
410 |
+
return authors, title, digitalized, doc_source
|
411 |
+
|
412 |
+
def make_metadata_ds(data_path:str) -> Dataset:
|
413 |
+
"""
|
414 |
+
Extracts Json metadata from all files in path and creates a ds
|
415 |
+
"""
|
416 |
+
ds_list = []
|
417 |
+
year_folders = os.listdir(data_path)
|
418 |
+
#remove everythin thats not a year
|
419 |
+
year_folders = [i for i in year_folders if len(i)==4]
|
420 |
+
for year in tqdm(year_folders):
|
421 |
+
for file in year:
|
422 |
+
if "pdf" in file:
|
423 |
+
input_file=os.path.join(data_path,year,file)
|
424 |
+
#get metadata
|
425 |
+
pub_year = year
|
426 |
+
#get metadata (from json)
|
427 |
+
author, title, _, _ = extract_meta_data(input_file)
|
428 |
+
|
429 |
+
meta_row = {"doc": file,
|
430 |
+
"author": author,
|
431 |
+
"title": title,
|
432 |
+
"pub_year": pub_year}
|
433 |
+
ds_list.append(meta_row)
|
434 |
+
else:
|
435 |
+
pass
|
436 |
+
|
437 |
+
meta_ds = Dataset.from_list(ds_list)
|
438 |
+
#add author separation, and abbreviations
|
439 |
+
meta_ds = meta_ds.map(separate_authors)
|
440 |
+
|
441 |
+
return meta_ds
|
442 |
+
|
443 |
+
|
444 |
+
def main():
|
445 |
+
#obtain scraped data
|
446 |
+
ds_filtered = Dataset.from_parquet("..","scrape","da_people_large.parquet")
|
447 |
+
ds_filtered = ds_filtered.map(add_abbreviations)
|
448 |
+
ds_filtered = ds_filtered.filter(dead_70_yrs_ago)
|
449 |
+
##### get metadata from all pdfs into a dataset
|
450 |
+
data_path = os.path.join("..","..","..","kb-books","raw")
|
451 |
+
meta_ds = make_metadata_ds(data_path)
|
452 |
+
|
453 |
+
#match
|
454 |
+
init_ds = {"doc": ["missing"],
|
455 |
+
"author": ["missing"],
|
456 |
+
"n_author": ["missing"],
|
457 |
+
"title": ["missing"],
|
458 |
+
"pub_year": ["missing"],
|
459 |
+
"match": ["missing"],
|
460 |
+
"link": ["missing"],
|
461 |
+
"match_ratio": ["missing"],
|
462 |
+
"pass":["missing"]}
|
463 |
+
new_ds = Dataset.from_dict(init_ds)
|
464 |
+
|
465 |
+
for i in tqdm(meta_ds):
|
466 |
+
author_matches = []
|
467 |
+
link_matches = []
|
468 |
+
matched_authors = "missing"
|
469 |
+
matched_link= "missing"
|
470 |
+
publication_date = int(i["pub_year"])
|
471 |
+
#remove authors who were not more than 18 when published
|
472 |
+
#they are probably just namesakes
|
473 |
+
ds_filt = ds_filtered.filter(lambda ds: int(ds["born"]) + 18 < publication_date, desc= f"Year: {publication_date}")
|
474 |
+
|
475 |
+
for author in i["c_author"]:
|
476 |
+
if check_abbrevs(author):
|
477 |
+
name_list, link_list = match_by_abbreviation(author,ds_filt)
|
478 |
+
else:
|
479 |
+
name_list, link_list = match_by_name(author,ds_filt)
|
480 |
+
|
481 |
+
author_matches.append(name_list)
|
482 |
+
link_matches.append(link_list)
|
483 |
+
|
484 |
+
if len(author_matches) > 0:
|
485 |
+
matched_authors = author_matches
|
486 |
+
matched_link = link_matches
|
487 |
+
else:
|
488 |
+
pass
|
489 |
+
|
490 |
+
########## evaluate matches
|
491 |
+
#stringify the list like:
|
492 |
+
#a1m1, a2m2; a2m1 , a2m2 (a1m1 = author 1 match 1)
|
493 |
+
|
494 |
+
str_auths = "; ".join([", ".join(i) for i in matched_authors])
|
495 |
+
str_links = "; ".join([", ".join(i) for i in matched_link])
|
496 |
+
#check if all authors have a match
|
497 |
+
match_ratio = len([x for x in matched_authors if len(x) > 0]) / int(i["n_author"])
|
498 |
+
if match_ratio == 1:
|
499 |
+
is_it_pd = True
|
500 |
+
else:
|
501 |
+
is_it_pd = False
|
502 |
+
|
503 |
+
#save info
|
504 |
+
temp_ds = {"doc": i["doc"],
|
505 |
+
##Adjust for real data
|
506 |
+
"author": "; ".join(i["c_author"]),
|
507 |
+
"n_author": i["n_author"],
|
508 |
+
"title": i["title"],
|
509 |
+
"pub_year": i["pub_year"],
|
510 |
+
"match":str_auths,
|
511 |
+
"link": str_links,
|
512 |
+
"match_ratio":match_ratio,
|
513 |
+
"pass":is_it_pd}
|
514 |
+
|
515 |
+
new_ds = new_ds.add_item(temp_ds)
|
516 |
+
#filter confirmed passes
|
517 |
+
new_ds = new_ds.filter(lambda ds: ds["pass"] == "true")
|
518 |
+
passed_filenames = new_ds["doc"]
|
519 |
+
#save whole dataset for checking
|
520 |
+
new_ds.to_parquet("public_domain_data.parquet")
|
521 |
+
#save only the filenames
|
522 |
+
with open("public_domain_files.txt", 'w') as outfile:
|
523 |
+
outfile.write('\n'.join(str(i) for i in passed_filenames))
|
scrape/da_people_large.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5725ad3bc12770861cee958791af719672648f436d9ac7a5818fd3d97ce5455e
|
3 |
+
size 476334
|
scrape/scrape_large.py
ADDED
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import requests
|
4 |
+
from tqdm import tqdm
|
5 |
+
from datasets import Dataset
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
|
8 |
+
# try to find info
|
9 |
+
def from_wiki_script(author_soup):
|
10 |
+
try:
|
11 |
+
text = str(author_soup.find_all("script")[0])
|
12 |
+
try:
|
13 |
+
birth = re.findall(r"Født i \d{4}",text)[0]
|
14 |
+
birth = re.findall(r"\d{4}",birth)[0]
|
15 |
+
except IndexError:
|
16 |
+
birth = None
|
17 |
+
|
18 |
+
try:
|
19 |
+
death = re.findall(r"Døde i \d{4}",text)[0]
|
20 |
+
death = re.findall(r"\d{4}",death)[0]
|
21 |
+
except IndexError:
|
22 |
+
death = None
|
23 |
+
except KeyError:
|
24 |
+
birth = None
|
25 |
+
death = None
|
26 |
+
return birth, death
|
27 |
+
|
28 |
+
def from_infobox(author_soup):
|
29 |
+
#right infobox
|
30 |
+
try:
|
31 |
+
boxes = author_soup.find_all("table")
|
32 |
+
try:
|
33 |
+
boxes = [i for i in boxes if "biography" in i["class"]][0]
|
34 |
+
try:
|
35 |
+
#vals first 4 digit nr after string
|
36 |
+
death = re.findall(r"(?<=Død).*?[^\d]*(\d{4})",str(boxes))[0]
|
37 |
+
except IndexError:
|
38 |
+
death = None
|
39 |
+
try:
|
40 |
+
birth = re.findall(r"(?<=Født).*?[^\d]*(\d{4})",str(boxes))[0]
|
41 |
+
except IndexError:
|
42 |
+
birth = None
|
43 |
+
except IndexError:
|
44 |
+
birth = None
|
45 |
+
death = None
|
46 |
+
except KeyError:
|
47 |
+
birth = None
|
48 |
+
death = None
|
49 |
+
|
50 |
+
return birth, death
|
51 |
+
|
52 |
+
#last resort, find first two 4 digit nums in first textbox
|
53 |
+
def from_wiki_text(author_soup):
|
54 |
+
try:
|
55 |
+
text = list(author_soup.find_all("p"))[0].get_text()
|
56 |
+
try:
|
57 |
+
birth = re.findall(r"\d{4}",text)[0]
|
58 |
+
except IndexError:
|
59 |
+
birth = None
|
60 |
+
try:
|
61 |
+
death = re.findall(r"\d{4}",text)[1]
|
62 |
+
except IndexError:
|
63 |
+
death = None
|
64 |
+
except KeyError:
|
65 |
+
birth = None
|
66 |
+
death = None
|
67 |
+
|
68 |
+
return birth, death
|
69 |
+
|
70 |
+
def none_to_q(val:str) -> str:
|
71 |
+
""" If value is None replaces it with ?"""
|
72 |
+
if val is None:
|
73 |
+
val = "?"
|
74 |
+
else:
|
75 |
+
pass
|
76 |
+
return val
|
77 |
+
|
78 |
+
def find_wiki_birth_death(author_soup):
|
79 |
+
birth, death = from_wiki_script(author_soup)
|
80 |
+
if birth is None and death is None:
|
81 |
+
birth, death = from_infobox(author_soup)
|
82 |
+
else:
|
83 |
+
pass
|
84 |
+
if birth is None and death is None:
|
85 |
+
birth, death = from_wiki_text(author_soup)
|
86 |
+
else:
|
87 |
+
pass
|
88 |
+
|
89 |
+
birth = none_to_q(birth)
|
90 |
+
death = none_to_q(death)
|
91 |
+
|
92 |
+
return birth, death
|
93 |
+
|
94 |
+
#filter for people
|
95 |
+
def is_a_person(tag):
|
96 |
+
return( (tag.has_attr('href')) and
|
97 |
+
(tag.has_attr('title')) and
|
98 |
+
(len(tag.attrs) == 2) and
|
99 |
+
("index" not in tag.get("href")) and
|
100 |
+
(":") not in tag.get("href"))
|
101 |
+
#filter for wikimedia commons
|
102 |
+
def is_a_person_commons(tag):
|
103 |
+
return( (tag.has_attr('href')) and
|
104 |
+
(tag.has_attr('title')) and
|
105 |
+
(len(tag.attrs) == 2) and
|
106 |
+
("index" not in tag.get("href")) and
|
107 |
+
(("Writers" not in tag.get("title")) and
|
108 |
+
("ategories" not in tag.get("title")) and
|
109 |
+
("Denmark" not in tag.get("title"))) and
|
110 |
+
("Category" in tag.get("title"))
|
111 |
+
)
|
112 |
+
#filter for author subcategories
|
113 |
+
def is_a_subcategory(tag):
|
114 |
+
return( (tag.has_attr('href')) and
|
115 |
+
(tag.has_attr('title')) and
|
116 |
+
("Dansksprogede" in tag.get("title"))
|
117 |
+
)
|
118 |
+
|
119 |
+
def flatten(twodlist :list[list,list]) -> list:
|
120 |
+
""" flatten a list by 1 dimension"""
|
121 |
+
onedlist = [x for xs in twodlist for x in xs]
|
122 |
+
return onedlist
|
123 |
+
|
124 |
+
def extract_authors(people,
|
125 |
+
authors:list[dict[str,str]],
|
126 |
+
name_list:list[str]
|
127 |
+
) -> list[list[dict[str,str]], list[str]]:
|
128 |
+
for i in people:
|
129 |
+
author_name = i.get("title")
|
130 |
+
author_link = i.get("href")
|
131 |
+
if author_name not in name_list:
|
132 |
+
#find their death
|
133 |
+
author_page = requests.get(f"https://da.wikipedia.org{author_link}")
|
134 |
+
author_soup = BeautifulSoup(author_page.content, 'html.parser')
|
135 |
+
|
136 |
+
birth, death = find_wiki_birth_death(author_soup)
|
137 |
+
|
138 |
+
author_row={
|
139 |
+
"link": f"https://da.wikipedia.org{author_link}",
|
140 |
+
"name":author_name,
|
141 |
+
"born":birth,
|
142 |
+
"died":death,
|
143 |
+
"name_yr":f"{author_name} ({birth}-{death})"
|
144 |
+
}
|
145 |
+
authors.append(author_row)
|
146 |
+
name_list.append(author_name)
|
147 |
+
else:
|
148 |
+
pass
|
149 |
+
return authors, name_list
|
150 |
+
|
151 |
+
def extract_authors_commons(people,
|
152 |
+
authors:list[dict[str,str]],
|
153 |
+
name_list:list[str]
|
154 |
+
) -> list[list[dict[str,str]], list[str]]:
|
155 |
+
for i in people:
|
156 |
+
author_name = i.get_text()
|
157 |
+
author_link = i.get("href")
|
158 |
+
if author_name not in name_list:
|
159 |
+
#find their death
|
160 |
+
author_page = requests.get(f"https://commons.wikimedia.org{author_link}")
|
161 |
+
author_soup = BeautifulSoup(author_page.content, 'html.parser')
|
162 |
+
|
163 |
+
boxes = author_soup.find_all("table")
|
164 |
+
try:
|
165 |
+
box = [i for i in boxes if "Date of birth" in str(i)][0]
|
166 |
+
try:
|
167 |
+
#vals first 4 digit nr after string
|
168 |
+
death = re.findall(r"(?<=Date of death).*?[^\d]*(\d{4})",str(box))[0]
|
169 |
+
except IndexError:
|
170 |
+
death = None
|
171 |
+
try:
|
172 |
+
birth = re.findall(r"(?<=Date of birth).*?[^\d]*(\d{4})",str(box))[0]
|
173 |
+
except IndexError:
|
174 |
+
birth = None
|
175 |
+
except IndexError:
|
176 |
+
birth = None
|
177 |
+
death = None
|
178 |
+
|
179 |
+
birth = none_to_q(birth)
|
180 |
+
death = none_to_q(death)
|
181 |
+
|
182 |
+
author_row={
|
183 |
+
"link": f"https://commons.wikimedia.org{author_link}",
|
184 |
+
"name":author_name,
|
185 |
+
"born":birth,
|
186 |
+
"died":death,
|
187 |
+
"name_yr":f"{author_name} ({birth}-{death})"
|
188 |
+
}
|
189 |
+
authors.append(author_row)
|
190 |
+
name_list.append(author_name)
|
191 |
+
else:
|
192 |
+
pass
|
193 |
+
|
194 |
+
return authors, name_list
|
195 |
+
|
196 |
+
def is_next_page(tag):
|
197 |
+
return (tag.get_text() == "næste side")
|
198 |
+
|
199 |
+
def main():
|
200 |
+
authors = []
|
201 |
+
name_list = []
|
202 |
+
#people from main page
|
203 |
+
print(f"https://da.wikipedia.org/wiki/Kategori:Dansksprogede_forfattere")
|
204 |
+
page = requests.get(f"https://da.wikipedia.org/wiki/Kategori:Dansksprogede_forfattere")
|
205 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
206 |
+
#1 get all peeps from page
|
207 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
|
208 |
+
authors, name_list = extract_authors(
|
209 |
+
people,
|
210 |
+
authors,
|
211 |
+
name_list
|
212 |
+
)
|
213 |
+
##### go into subcategories
|
214 |
+
sub_c = soup.find_all("ul" and "li" and "a" and is_a_subcategory)
|
215 |
+
for i in sub_c:
|
216 |
+
|
217 |
+
if "Danmark" not in i.get("title"):
|
218 |
+
#if author not from denmark (more pages, less people)
|
219 |
+
new_link = f"https://da.wikipedia.org/{i.get("href")}"
|
220 |
+
page = requests.get(new_link)
|
221 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
222 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
|
223 |
+
authors, name_list = extract_authors(
|
224 |
+
people,
|
225 |
+
authors,
|
226 |
+
name_list
|
227 |
+
)
|
228 |
+
print(f"DONE: {i.get("title")}")
|
229 |
+
|
230 |
+
elif "Danmark" in i.get("title"):
|
231 |
+
#if author from denmark (less pages, more people)
|
232 |
+
print("Processing Authors from Denmark (alphabetic order)...\n")
|
233 |
+
#alphabet_list = "A B".split()
|
234 |
+
alphabet_list = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z Æ Ø Å".split()
|
235 |
+
for letter in tqdm(alphabet_list):
|
236 |
+
page = requests.get(f"https://da.wikipedia.org/w/index.php?title=Kategori:Dansksprogede_forfattere_fra_Danmark&from={letter}")
|
237 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
238 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
|
239 |
+
authors, name_list = extract_authors(
|
240 |
+
people,
|
241 |
+
authors,
|
242 |
+
name_list
|
243 |
+
)
|
244 |
+
###other webpages
|
245 |
+
print(f"Processing https://commons.wikimedia.org/wiki/Category:Writers_from_Denmark_by_name")
|
246 |
+
#abc_list = "A B".split()
|
247 |
+
abc_list = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split()
|
248 |
+
for abc in tqdm(abc_list):
|
249 |
+
page = requests.get(f"https://commons.wikimedia.org/w/index.php?title=Category:Writers_from_Denmark_by_name&from={abc}")
|
250 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
251 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person_commons))
|
252 |
+
authors, name_list = extract_authors_commons(
|
253 |
+
people,
|
254 |
+
authors,
|
255 |
+
name_list
|
256 |
+
)
|
257 |
+
|
258 |
+
print(f"Processing https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon")
|
259 |
+
|
260 |
+
#get names from page, next page, repeat
|
261 |
+
for abc in tqdm(abc_list):
|
262 |
+
page = requests.get(f"https://commons.wikimedia.org/w/index.php?title=Category:Writers_from_Denmark_by_name&from={abc}")
|
263 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
264 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person_commons))
|
265 |
+
authors, name_list = extract_authors_commons(
|
266 |
+
people,
|
267 |
+
authors,
|
268 |
+
name_list
|
269 |
+
)
|
270 |
+
#another webpage
|
271 |
+
p_counter = 0
|
272 |
+
print(f"Processing https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon")
|
273 |
+
|
274 |
+
page = requests.get(f"https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon")
|
275 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
276 |
+
#get names from page, next page, repeat until no more next page
|
277 |
+
while len(soup.find_all("a" and is_next_page)) > 0:
|
278 |
+
|
279 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
|
280 |
+
authors, name_list = extract_authors(
|
281 |
+
people,
|
282 |
+
authors,
|
283 |
+
name_list
|
284 |
+
)
|
285 |
+
p_counter += 1
|
286 |
+
new_page = soup.find_all("a" and is_next_page)[0]["href"]
|
287 |
+
new_link = f"https://da.wikipedia.org/{new_page}"
|
288 |
+
page = requests.get(new_link)
|
289 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
290 |
+
print(f"Scraped page {p_counter}/~30...")
|
291 |
+
else:
|
292 |
+
#last page
|
293 |
+
print("Scraping last page...")
|
294 |
+
people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
|
295 |
+
authors, name_list = extract_authors(
|
296 |
+
people,
|
297 |
+
authors,
|
298 |
+
name_list
|
299 |
+
)
|
300 |
+
|
301 |
+
ds = Dataset.from_list(authors)
|
302 |
+
ds.to_parquet(os.path.join(".","da_people_large.parquet"))
|
303 |
+
|
304 |
+
|
305 |
+
if __name__ == "__main__":
|
306 |
+
main()
|