Datasets:

Modalities:
Image
Text
Formats:
arrow
Libraries:
Datasets
License:
kb-books / pd_check /select_pd.py
balsab's picture
demo setup (#1)
c1241f6 verified
raw
history blame
17.5 kB
import os
import re
import string
import json
import logging
import itertools
from datetime import datetime
from tqdm import tqdm
from difflib import SequenceMatcher
import fitz
from datasets import Dataset, load_dataset
def int_to_roman(number:int) -> str:
""" integers to roman numerals"""
ROMAN = [
(1000, "M"),
( 900, "CM"),
( 500, "D"),
( 400, "CD"),
( 100, "C"),
( 90, "XC"),
( 50, "L"),
( 40, "XL"),
( 10, "X"),
( 9, "IX"),
( 5, "V"),
( 4, "IV"),
( 1, "I"),
]
result = []
for (arabic, roman) in ROMAN:
(factor, number) = divmod(number, arabic)
result.append(roman * factor)
if number == 0:
break
return "".join(result)
def process_non_upper_names(author:str) -> str:
"""
returns the end of a name, in which there are
non-uppercase parts
2 special cases
- some middle name indicating a place "von,af,de etc...
- some end name indicating a trait "den yngre, den ælder etc...
1 special case
- some numeral used for royalty "Christian 1."
"""
mid_name_list = [' af ',
' da ',
' de ',
' du ',
' la ',
' le ',
' til ',
' van ',
' von ',
' zu '
]
end_name_list = [' den Gamle',
' den Hellige',
' den Store',
' den Unge',
' den eldre',
' den yngre',
' den yngste',
' den Ældre',
' den ældre',
' junior',
' the Younger'
]
#get everythin after midname
mid_name = [i for i in mid_name_list if i in author]
if len(mid_name) != 0:
mid_name = re.findall(rf"{mid_name[0]}.*", author)
#get end, if midname already has it, don't
end_name = [i for i in end_name_list if i in author]
if len(end_name) != 0 and len(mid_name) != 0 and end_name[0] in mid_name[0]:
end_name = []
full_name = [mid_name,end_name]
full_name = "".join(sum(full_name, []))
return full_name
def monogram_list(author: str) -> list[str]:
"""
creates a list of first name abbreviations
John Alex Sample --> [John Alex Sample, J Alex Sample, J A Sample, John A Sample]
special case endings added by
process_non_upper_names(author:str) -> str:
int_to_roman(number:int) -> str:
"""
#remove special cases, they will be re-added after the last name at the end
spec_end = process_non_upper_names(author)
author = author.replace(spec_end,"")
abbreviation_list = [author]
#split on whitespace
author = re.split(r"[\s]+",author)
#-1 because last name stays long
in_list = author[:-1]
#get all combinations of list elements as sublists
out_list = [c for i in range(len(in_list)) for c in itertools.combinations(in_list, i+1)]
out_list = [x[0] if len(x) == 1 else list(x) for x in out_list]
#make single entries into one item lists
out_list = [["".join(list(x))] if type(x) is not list else x for x in out_list]
for name_combos in out_list:
name_short = []
name_index = []
#for each list of combinations
#the names in that combination will be shortened
for name in name_combos:
if name in author:
#find the elements in that exact combination
#replace the names with the first character
og_index = author.index(name)
abbreviation = name[0]
#list indexes and replacements for those indexes
name_index.append(og_index)
name_short.append(abbreviation)
#replace
abbr_author = author.copy()
for (name_index, name_short) in zip(name_index, name_short):
abbr_author[name_index] = name_short
abbreviation_list.append(" ".join(abbr_author))
#remove dupes, add special endings
abbreviation_list = list(set(abbreviation_list))
if spec_end != "":
spec_end_l = [i + spec_end for i in abbreviation_list]
abbreviation_list = sum([abbreviation_list,spec_end_l],[])
if sum(1 for i in author if i.isdigit()) > 0:
numeric = [i for i in author if i.isdigit()][0]
rom_num = int_to_roman(int(numeric))
rom_num_l = [i.replace(numeric,rom_num) for i in abbreviation_list]
abbreviation_list = sum([abbreviation_list,rom_num_l],[])
return abbreviation_list
def clean_names(authors:str) -> list[str]:
"""
Takes a string of author names separated by ";" and reformats
- returns a list
- switches surname firstname order if "," present
- removes parentheses
e.g
from:
" Jens Sample ; Example, Hans ; S. A. Ample (example)"
to:
["Jens Sample","Hans Example","S. A. Ample"]
"""
authors = authors.split(";")
#If "," --> split item, reverse it, rejoin it. Otherwise don't
authors = ["".join(x.split(",")[::-1]) if "," in x else x for x in authors]
#If parentheses, remove them.
authors = [re.sub(r"[\(].*?[\)]", "", x) if ("(" or ")") in x else x for x in authors]
#separate elements by char, make punctuation whitespace, join again,
authors = ["".join([ch if ch not in string.punctuation else " " for ch in el]).strip() for el in authors]
#If two uppercase characters are beside, separate them AB -> A B
authors = ["".join([" " + ch if el[chr_count-1].isupper() and ch.isupper() else ch for chr_count, ch in enumerate(el)]) for el in authors]
#remove excess spaces (inside and from the sides) and empty elements
authors = [re.sub(' +', ' ', i).strip() for i in authors if i != ""]
return authors
def lower_names(authors: list[str]) -> list[str]:
"""
Takes a list of author names lowercases them for easier comparison
e.g
from:
["Jens Sample","Hans Example","S. A. Ample"]
to:
["jenssample","hansexample","saample"]
"""
#remove dots and spaces, lowercase
authors = [x.replace(".","") for x in authors]
authors = [x.replace(" ","") for x in authors]
authors = [x.lower() for x in authors]
return authors
def add_abbreviations(ds:Dataset)->Dataset:
"""add abbreviations to single author names in the scraped reference dataset"""
#clean it first to remove parentheses
ds["c_name"] = clean_names(ds["name"])
ds["abbrevs"] = monogram_list(
clean_names(ds["name"])[0]
)
return ds
def separate_authors(ds:Dataset)->Dataset:
"""separate authors in the pdf metadata"""
ds["c_author"] = clean_names(ds["author"])
ds["n_author"] = len(ds["c_author"])
ds["abbrevs"] = [monogram_list(i) for i in ds["c_author"]]
return ds
def check_abbrevs(name:str) -> bool:
"""checks if there abbreviations in a name"""
newstr = " "+ name
#any whitespace any word any whitespace pattern match
if re.match(r"\s\w\s",newstr) is None:
return False
else:
return True
def dead_70_yrs_ago(ds):
"""filter for the scraped authors to find ones who have died 70 years ago"""
birth_miss = False
death_miss = False
try:
birth_yr = int(ds["born"])
if birth_yr > 1955:
birth = False
else:
birth = True
except ValueError:
birth = False
birth_miss = True
try:
death_yr = int(ds["died"])
if death_yr > 1955:
death = False
else:
death = True
except ValueError:
death = False
death_miss = True
#both years are before 1955 and none of them are missing
if (death and birth and not birth_miss and not death_miss):
filtered = True
else:
filtered = False
return filtered
def match_by_name(name : str,
ds_filt: dict[str|str]) -> list[list[str],list[str]] :
"""
Match a name to another list of meta data
returns [name + birth and death dates], [reference link]
"""
ref_names = [x for xs in ds_filt["c_name"] for x in xs]
name_matches = []
link_matches = []
#match the name to the list off all clean names,
#get the more informative name from the ds based on index
found_author = "".join(set([name]).intersection(ref_names))
found_names = [ds_filt["name_yr"][indx] for indx in [i for i in range(len(ref_names)) if ref_names[i]==found_author]]
found_links = [ds_filt["link"][indx] for indx in [i for i in range(len(ref_names)) if ref_names[i]==found_author]]
#append the elements, not the list
for i in found_names:
name_matches.append(i)
for i in found_links:
link_matches.append(i)
return name_matches, link_matches
def match_by_abbreviation(abbrev : str, ds_filt: dict[str|str]) -> list[list[str],list[str]] :
"""
Match a name with an abbreviated word in it to another list of meta data.
returns [name + birth and death dates], [reference link]
"""
name_matches = []
link_matches = []
#find all occurrences of the abbreviation match
a_m = [x["abbrevs"] for x in ds_filt if abbrev in x["abbrevs"]]
#get the name_yr variable for those matches based on index
found_names = [ds_filt[i]["name_yr"] for i in range(len(ds_filt)) for x in a_m if ds_filt[i]["abbrevs"] == x]
#same with links
found_links = [ds_filt[i]["link"] for i in range(len(ds_filt)) for x in a_m if ds_filt[i]["abbrevs"] == x]
#remove duplicates
found_names = list(set(found_names))
found_links = list(set(found_links))
#append the elements, not the list
for i in found_names:
name_matches.append(i)
for i in found_links:
link_matches.append(i)
#remove duplicates
name_matches = list(set(name_matches))
link_matches = list(set(link_matches))
return name_matches, link_matches
def find_author_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding the author(s) from various possible locations in the json metadata.
"""
try:
author = data.get("pnx").get("addata")["au"]
except KeyError:
author = []
try:
add_author = data.get("pnx").get("addata")["addau"]
except KeyError:
add_author = []
authors = list(set(author)) + list(set(add_author))
authors = "; ".join(authors)
if len(authors) < 1:
try:
authors = data.get("pnx").get("sort")["author"]
authors = "; ".join(authors)
except KeyError:
pass
if len(authors) < 1:
try:
authors = data.get("pnx").get("display")["creator"]
authors = "; ".join(authors)
except KeyError:
authors = "missing"
return (authors)
def find_title_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding the title from various possible locations in the json metadata.
"""
try:
title = data.get("pnx").get("display")["title"][0]
except KeyError:
title = []
if len(title) < 1:
try:
title = data.get("pnx").get("addata")["btitle"][0]
except KeyError:
pass
else:
pass
if len(title) < 1:
try:
title = data.get("pnx").get("sort")["title"][0]
except KeyError:
pass
if len(title) < 1:
title = "missing"
return(title)
def find_digitalization(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding the digitalisation date from various possible locations in the json metadata.
"""
try:
digit = data.get("pnx").get("display")["creationdate"][0]
#last 4 digit number in string
digit = re.findall(r"\d{4}$",digit)[0]
except KeyError:
digit = []
if len(digit) < 1:
try:
digit = data.get("pnx").get("addata")["date"][1]
digit = re.findall(r"\d{4}$",digit)[0]
except KeyError:
digit = "missing"
return(digit)
def find_source(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding source of the document from the json metadata.
"""
try:
doc_source = data.get("pnx").get("display")["lds50"]
#last 4 digit number in string
doc_source = [i for i in doc_source if "Digi" not in i][0]
except (KeyError, IndexError):
doc_source = "missing"
return doc_source
def extract_meta_data(pdf_file:str) -> dict[str,str|int]:
"""
A function for extracting meta data from the json files
includes:
- author(s)
- title
- published
- digitalized
- source
"""
try:
#load in json
json_file = pdf_file[:-3] + "json"
f = open(json_file)
data = json.load(f)
#do stuff
authors = find_author_json(data)
title = find_title_json(data)
digitalized = find_digitalization(data)
doc_source = find_source(data)
#close
f.close()
except BaseException:
authors = "missing"
title = "missing"
digitalized = "missing"
doc_source = "missing"
return authors, title, digitalized, doc_source
def make_metadata_ds(data_path:str) -> Dataset:
"""
Extracts Json metadata from all files in path and creates a ds
"""
ds_list = []
year_folders = os.listdir(data_path)
#remove everythin thats not a year
year_folders = [i for i in year_folders if len(i)==4]
for year in tqdm(year_folders):
for file in year:
if "pdf" in file:
input_file=os.path.join(data_path,year,file)
#get metadata
pub_year = year
#get metadata (from json)
author, title, _, _ = extract_meta_data(input_file)
meta_row = {"doc": file,
"author": author,
"title": title,
"pub_year": pub_year}
ds_list.append(meta_row)
else:
pass
meta_ds = Dataset.from_list(ds_list)
#add author separation, and abbreviations
meta_ds = meta_ds.map(separate_authors)
return meta_ds
def main():
#obtain scraped data
ds_filtered = Dataset.from_parquet("..","scrape","da_people_large.parquet")
ds_filtered = ds_filtered.map(add_abbreviations)
ds_filtered = ds_filtered.filter(dead_70_yrs_ago)
##### get metadata from all pdfs into a dataset
data_path = os.path.join("..","..","..","kb-books","raw")
meta_ds = make_metadata_ds(data_path)
#match
init_ds = {"doc": ["missing"],
"author": ["missing"],
"n_author": ["missing"],
"title": ["missing"],
"pub_year": ["missing"],
"match": ["missing"],
"link": ["missing"],
"match_ratio": ["missing"],
"pass":["missing"]}
new_ds = Dataset.from_dict(init_ds)
for i in tqdm(meta_ds):
author_matches = []
link_matches = []
matched_authors = "missing"
matched_link= "missing"
publication_date = int(i["pub_year"])
#remove authors who were not more than 18 when published
#they are probably just namesakes
ds_filt = ds_filtered.filter(lambda ds: int(ds["born"]) + 18 < publication_date, desc= f"Year: {publication_date}")
for author in i["c_author"]:
if check_abbrevs(author):
name_list, link_list = match_by_abbreviation(author,ds_filt)
else:
name_list, link_list = match_by_name(author,ds_filt)
author_matches.append(name_list)
link_matches.append(link_list)
if len(author_matches) > 0:
matched_authors = author_matches
matched_link = link_matches
else:
pass
########## evaluate matches
#stringify the list like:
#a1m1, a2m2; a2m1 , a2m2 (a1m1 = author 1 match 1)
str_auths = "; ".join([", ".join(i) for i in matched_authors])
str_links = "; ".join([", ".join(i) for i in matched_link])
#check if all authors have a match
match_ratio = len([x for x in matched_authors if len(x) > 0]) / int(i["n_author"])
if match_ratio == 1:
is_it_pd = True
else:
is_it_pd = False
#save info
temp_ds = {"doc": i["doc"],
##Adjust for real data
"author": "; ".join(i["c_author"]),
"n_author": i["n_author"],
"title": i["title"],
"pub_year": i["pub_year"],
"match":str_auths,
"link": str_links,
"match_ratio":match_ratio,
"pass":is_it_pd}
new_ds = new_ds.add_item(temp_ds)
#filter confirmed passes
new_ds = new_ds.filter(lambda ds: ds["pass"] == "true")
passed_filenames = new_ds["doc"]
#save whole dataset for checking
new_ds.to_parquet("public_domain_data.parquet")
#save only the filenames
with open("public_domain_files.txt", 'w') as outfile:
outfile.write('\n'.join(str(i) for i in passed_filenames))