Datasets:

Modalities:
Image
Text
Formats:
arrow
Libraries:
Datasets
License:
File size: 17,544 Bytes
c1241f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
import os
import re
import string
import json
import logging
import itertools
from datetime import datetime
from tqdm import tqdm
from difflib import SequenceMatcher


import fitz
from datasets import Dataset, load_dataset

def int_to_roman(number:int) -> str:
    """ integers to roman numerals"""
    ROMAN = [
    (1000, "M"),
    ( 900, "CM"),
    ( 500, "D"),
    ( 400, "CD"),
    ( 100, "C"),
    (  90, "XC"),
    (  50, "L"),
    (  40, "XL"),
    (  10, "X"),
    (   9, "IX"),
    (   5, "V"),
    (   4, "IV"),
    (   1, "I"),
    ]
    result = []
    for (arabic, roman) in ROMAN:
        (factor, number) = divmod(number, arabic)
        result.append(roman * factor)
        if number == 0:
            break
    return "".join(result)

def process_non_upper_names(author:str) -> str:
    """ 
    returns the end of a name, in which there are
    non-uppercase parts
    
    2 special cases 
    - some middle name indicating a place "von,af,de etc...
    - some end name indicating a trait "den yngre, den ælder etc...
    1 special case 
    - some numeral used for royalty "Christian 1."
    """
    mid_name_list = [' af ',
                    ' da ',
                    ' de ',
                    ' du ',
                    ' la ',
                    ' le ',
                    ' til ',
                    ' van ',
                    ' von ',
                    ' zu '          
                    ]
    end_name_list = [' den Gamle',
                    ' den Hellige',
                    ' den Store',
                    ' den Unge',
                    ' den eldre',
                    ' den yngre',
                    ' den yngste',
                    ' den Ældre',
                    ' den ældre',
                    ' junior',
                    ' the Younger'
                    ]
    #get everythin after midname
    mid_name = [i for i in mid_name_list if i in author]
    if len(mid_name) != 0:
        mid_name = re.findall(rf"{mid_name[0]}.*", author)
    #get end, if midname already has it, don't
    end_name = [i for i in end_name_list if i in author]
    if  len(end_name) != 0 and len(mid_name) != 0 and end_name[0] in mid_name[0]:
        end_name = []
    full_name = [mid_name,end_name]
    full_name = "".join(sum(full_name, []))
    return full_name

def monogram_list(author: str) -> list[str]:
    """
    creates a list of first name abbreviations
    John Alex Sample --> [John Alex Sample, J Alex Sample, J A Sample, John A Sample]

    special case endings added by
    process_non_upper_names(author:str) -> str:
    int_to_roman(number:int) -> str:
    """
    #remove special cases, they will be re-added after the last name at the end
    spec_end = process_non_upper_names(author)
    author = author.replace(spec_end,"")

    abbreviation_list = [author]
    #split on whitespace
    author = re.split(r"[\s]+",author)
    #-1 because last name stays long
    in_list = author[:-1]
    #get all combinations of list elements as sublists
    out_list = [c for i in range(len(in_list)) for c in itertools.combinations(in_list, i+1)]
    out_list = [x[0] if len(x) == 1 else list(x) for x in out_list]
    #make single entries into one item lists
    out_list = [["".join(list(x))] if type(x) is not list else x for x in out_list]

    for name_combos in out_list:
        name_short = []
        name_index = []
        #for each list of combinations
        #the names in that combination will be shortened
        for name in name_combos:
            if name in author:
                #find the elements in that exact combination
                #replace the names with the first character
                og_index = author.index(name)
                abbreviation = name[0]
                #list indexes and replacements for those indexes
                name_index.append(og_index)
                name_short.append(abbreviation)
        
        #replace
        abbr_author = author.copy()
        for (name_index, name_short) in zip(name_index, name_short):
            abbr_author[name_index] = name_short
        abbreviation_list.append(" ".join(abbr_author))
        #remove dupes, add special endings
    abbreviation_list = list(set(abbreviation_list))

    if spec_end != "":
        spec_end_l = [i + spec_end for i in abbreviation_list]
        abbreviation_list = sum([abbreviation_list,spec_end_l],[])
    if sum(1 for i in author if i.isdigit()) > 0:
        numeric = [i for i in author if i.isdigit()][0]
        rom_num = int_to_roman(int(numeric))
        rom_num_l = [i.replace(numeric,rom_num) for i in abbreviation_list] 
        abbreviation_list = sum([abbreviation_list,rom_num_l],[])

    return abbreviation_list

def clean_names(authors:str) -> list[str]:
    """
    Takes a string of author names separated by ";" and reformats
    - returns a list
    - switches surname firstname order if "," present
    - removes parentheses 
    e.g
    from:
    " Jens Sample ; Example, Hans ; S. A. Ample (example)"
    to:
    ["Jens Sample","Hans Example","S. A. Ample"]
    """
    authors = authors.split(";")
    #If "," --> split item, reverse it, rejoin it. Otherwise don't
    authors = ["".join(x.split(",")[::-1]) if "," in x else x for x in authors]
    #If parentheses, remove them.
    authors = [re.sub(r"[\(].*?[\)]", "", x) if ("(" or ")") in x else x for x in authors]
    #separate elements by char, make punctuation whitespace, join again,
    authors = ["".join([ch if ch not in string.punctuation else " " for ch in el]).strip() for el in authors]
    #If two uppercase characters are beside, separate them AB -> A B
    authors = ["".join([" " + ch if el[chr_count-1].isupper() and ch.isupper() else ch for chr_count, ch in enumerate(el)]) for el in authors]
    #remove excess spaces (inside and from the sides) and empty elements
    authors = [re.sub(' +', ' ', i).strip() for i in authors if i != ""]
    
    return authors

def lower_names(authors: list[str]) -> list[str]:
    """
    Takes a list of author names lowercases them for easier comparison
    e.g
    from:
    ["Jens Sample","Hans Example","S. A. Ample"]
    to:
    ["jenssample","hansexample","saample"]
    """
    #remove dots and spaces, lowercase
    authors = [x.replace(".","") for x in authors]
    authors = [x.replace(" ","") for x in authors]
    authors = [x.lower() for x in authors]
    return authors

def add_abbreviations(ds:Dataset)->Dataset:
    """add abbreviations to single author names in the scraped reference dataset"""
    #clean it first to remove parentheses
    ds["c_name"] = clean_names(ds["name"])
    ds["abbrevs"] = monogram_list(
            clean_names(ds["name"])[0]
            )
    return ds

def separate_authors(ds:Dataset)->Dataset:
    """separate authors in the pdf metadata"""
    ds["c_author"] = clean_names(ds["author"])
    ds["n_author"] = len(ds["c_author"])
    ds["abbrevs"] = [monogram_list(i) for i in ds["c_author"]]

    return ds

def check_abbrevs(name:str) -> bool:
    """checks if there abbreviations in a name"""
    newstr = " "+ name
    #any whitespace any word any whitespace pattern match
    if re.match(r"\s\w\s",newstr) is None:
        return False
    else:
        return True

def dead_70_yrs_ago(ds):
    """filter for the scraped authors to find ones who have died 70 years ago"""
    birth_miss = False
    death_miss = False
    try:
        birth_yr = int(ds["born"])
        if birth_yr > 1955:
            birth = False
        else:
            birth = True
    except ValueError:
        birth = False
        birth_miss = True

    try:
        death_yr = int(ds["died"])
        if death_yr > 1955:
            death = False
        else:
            death = True
    except ValueError:
        death = False
        death_miss = True

    #both years are before 1955 and none of them are missing
    if (death and birth and not birth_miss and not death_miss):
        filtered = True
    else:
        filtered = False

    return filtered

def match_by_name(name : str, 
                  ds_filt: dict[str|str]) -> list[list[str],list[str]] :
    """
    Match a name to another list of meta data
    returns [name + birth and death dates], [reference link]
    """
    ref_names = [x for xs in ds_filt["c_name"] for x in xs]
    name_matches = []
    link_matches = []
    #match the name to the list off all clean names,
    #get the more informative name from the ds based on index
    found_author = "".join(set([name]).intersection(ref_names))
    found_names = [ds_filt["name_yr"][indx] for indx in [i for i in range(len(ref_names)) if ref_names[i]==found_author]]
    found_links = [ds_filt["link"][indx] for indx in [i for i in range(len(ref_names)) if ref_names[i]==found_author]]
    #append the elements, not the list
    for i in found_names:
        name_matches.append(i)
    for i in found_links:
        link_matches.append(i)

    return name_matches, link_matches 


def match_by_abbreviation(abbrev : str, ds_filt: dict[str|str]) -> list[list[str],list[str]] :
    """
    Match a name with an abbreviated word in it to another list of meta data.
    returns [name + birth and death dates], [reference link]
    """
    name_matches = []
    link_matches = []
    #find all occurrences of the abbreviation match
    a_m = [x["abbrevs"] for x in ds_filt if abbrev in x["abbrevs"]]
    #get the name_yr variable for those matches based on index
    found_names = [ds_filt[i]["name_yr"] for i in range(len(ds_filt)) for x in a_m if ds_filt[i]["abbrevs"] == x]
    #same with links
    found_links = [ds_filt[i]["link"] for i in range(len(ds_filt)) for x in a_m if ds_filt[i]["abbrevs"] == x]
    #remove duplicates
    found_names = list(set(found_names))
    found_links = list(set(found_links))
   
    #append the elements, not the list
    for i in found_names:
        name_matches.append(i)
    for i in found_links:
        link_matches.append(i)
    #remove duplicates
    name_matches = list(set(name_matches))
    link_matches = list(set(link_matches))

    return name_matches, link_matches 

def find_author_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
    """
    A function for finding the author(s) from various possible locations in the json metadata.
    """
    try:
        author = data.get("pnx").get("addata")["au"]
    except KeyError:
        author = []
    try:
        add_author = data.get("pnx").get("addata")["addau"]
    except KeyError:
        add_author = []

    authors = list(set(author)) + list(set(add_author))
    authors = "; ".join(authors)

    if len(authors) < 1:
        try:
            authors = data.get("pnx").get("sort")["author"]
            authors = "; ".join(authors)
        except KeyError:
            pass
    
    if len(authors) < 1:
        try:
            authors = data.get("pnx").get("display")["creator"]
            authors = "; ".join(authors)
        except KeyError:
            authors = "missing"

    return (authors)
    

def find_title_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
    """
    A function for finding the title from various possible locations in the json metadata.
    """
    try:
        title = data.get("pnx").get("display")["title"][0]
    except KeyError:
        title = []
    if len(title) < 1:
        try:
            title = data.get("pnx").get("addata")["btitle"][0]
        except KeyError:
            pass
    else:
        pass
    if len(title) < 1:
        try:
            title = data.get("pnx").get("sort")["title"][0]
        except KeyError:
            pass
    if len(title) < 1:
        title = "missing"
    return(title)


def find_digitalization(data: dict[str,dict[str,dict[str,str]]]) -> str:
    """
    A function for finding the digitalisation date from various possible locations in the json metadata.
    """
    try:
        digit = data.get("pnx").get("display")["creationdate"][0]
        #last 4 digit number in string
        digit = re.findall(r"\d{4}$",digit)[0]
    except KeyError:
        digit = []
    if len(digit) < 1:
        try:
            digit = data.get("pnx").get("addata")["date"][1]
            digit = re.findall(r"\d{4}$",digit)[0]
        except KeyError:
            digit = "missing"
    return(digit)

def find_source(data: dict[str,dict[str,dict[str,str]]]) -> str:
    """
    A function for finding source of the document from the json metadata.
    """
    try:
        doc_source = data.get("pnx").get("display")["lds50"]
        #last 4 digit number in string
        doc_source = [i for i in doc_source if "Digi" not in i][0]
    except (KeyError, IndexError):
        doc_source = "missing"
    return doc_source


def extract_meta_data(pdf_file:str) -> dict[str,str|int]:
    """
    A function for extracting meta data from the json files
    includes:
    - author(s)
    - title
    - published
    - digitalized
    - source
    """
    try:
        #load in json
        json_file = pdf_file[:-3] + "json"
        f = open(json_file)
        data = json.load(f)
        #do stuff
        authors = find_author_json(data)
        title = find_title_json(data)
        digitalized = find_digitalization(data)
        doc_source = find_source(data)
        #close
        f.close()
    except BaseException:
        authors = "missing"
        title = "missing"
        digitalized = "missing"
        doc_source = "missing"
    return authors, title, digitalized, doc_source

def make_metadata_ds(data_path:str) -> Dataset:
    """
    Extracts Json metadata from all files in path and creates a ds
    """
    ds_list = []
    year_folders = os.listdir(data_path)
    #remove everythin thats not a year
    year_folders = [i for i in year_folders if len(i)==4]
    for year in tqdm(year_folders):
        for file in year:
            if "pdf" in file:
                input_file=os.path.join(data_path,year,file)
                #get metadata
                pub_year = year
                #get metadata (from json)
                author, title, _, _ = extract_meta_data(input_file)

                meta_row = {"doc": file,
                            "author": author, 
                            "title": title,
                            "pub_year": pub_year}
                ds_list.append(meta_row)
            else:
                pass

    meta_ds = Dataset.from_list(ds_list)
    #add author separation, and abbreviations
    meta_ds = meta_ds.map(separate_authors)

    return meta_ds


def main():
    #obtain scraped data
    ds_filtered = Dataset.from_parquet("..","scrape","da_people_large.parquet")
    ds_filtered = ds_filtered.map(add_abbreviations)
    ds_filtered = ds_filtered.filter(dead_70_yrs_ago)
    ##### get metadata from all pdfs into a dataset
    data_path = os.path.join("..","..","..","kb-books","raw")
    meta_ds = make_metadata_ds(data_path)    

    #match
    init_ds = {"doc": ["missing"],
           "author": ["missing"], 
           "n_author": ["missing"],
           "title": ["missing"],
           "pub_year": ["missing"],
           "match": ["missing"],
           "link": ["missing"],
           "match_ratio": ["missing"],
           "pass":["missing"]}
    new_ds = Dataset.from_dict(init_ds)
  
    for i in tqdm(meta_ds):
        author_matches = []
        link_matches = []
        matched_authors = "missing"
        matched_link= "missing"
        publication_date = int(i["pub_year"])
        #remove authors who were not more than 18 when published
        #they are probably just namesakes
        ds_filt = ds_filtered.filter(lambda ds: int(ds["born"]) + 18 < publication_date, desc= f"Year: {publication_date}")

        for author in i["c_author"]:
            if check_abbrevs(author):
                name_list, link_list = match_by_abbreviation(author,ds_filt)
            else:
                name_list, link_list = match_by_name(author,ds_filt)

            author_matches.append(name_list)
            link_matches.append(link_list)

        if len(author_matches) > 0:
            matched_authors = author_matches
            matched_link = link_matches
        else:
            pass
        
        ########## evaluate matches
        #stringify the list like:
        #a1m1, a2m2; a2m1 , a2m2 (a1m1 = author 1 match 1)

        str_auths = "; ".join([", ".join(i) for i in matched_authors])
        str_links = "; ".join([", ".join(i) for i in matched_link])
        #check if all authors have a match
        match_ratio = len([x for x in matched_authors if len(x) > 0]) / int(i["n_author"])
        if match_ratio == 1:
            is_it_pd = True
        else:
            is_it_pd = False

        #save info
        temp_ds = {"doc": i["doc"],
                ##Adjust for real data
                "author": "; ".join(i["c_author"]),
                "n_author": i["n_author"],
                "title": i["title"], 
                "pub_year": i["pub_year"],
                "match":str_auths,
                "link": str_links,
                "match_ratio":match_ratio,
                "pass":is_it_pd}
                
        new_ds = new_ds.add_item(temp_ds)
        #filter confirmed passes
        new_ds = new_ds.filter(lambda ds: ds["pass"] == "true")
        passed_filenames = new_ds["doc"] 
        #save whole dataset for checking
        new_ds.to_parquet("public_domain_data.parquet")
        #save only the filenames
        with open("public_domain_files.txt", 'w') as outfile:
            outfile.write('\n'.join(str(i) for i in passed_filenames))