|
import os |
|
import re |
|
import string |
|
import json |
|
import logging |
|
import shutil |
|
from datetime import datetime |
|
from tqdm import tqdm |
|
|
|
import fitz |
|
from datasets import Dataset, load_dataset |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
source = "kb_books" |
|
|
|
n_chunks = 1 |
|
|
|
n_batch = 5 |
|
|
|
input_path = os.path.join("..","..","kb-books","raw") |
|
output_path = os.path.join(".","data") |
|
logs = os.path.join(".","log") |
|
|
|
start_year = 1751 |
|
|
|
stop_year = 1752 |
|
|
|
unwanted_folders = ("README.txt","logs") |
|
|
|
demo = False |
|
|
|
ref_pd_location = os.path.join(".","pd_check","public_domain_files.txt") |
|
with open(ref_pd_location, 'r') as pd_files: |
|
ref_pd_list = pd_files.read().splitlines() |
|
|
|
def find_author_json(data: dict[str,dict[str,dict[str,str]]]) -> str: |
|
""" |
|
A function for finding the author(s) from various possible locations in the json metadata. |
|
""" |
|
try: |
|
author = data.get("pnx").get("addata")["au"] |
|
except KeyError: |
|
author = [] |
|
try: |
|
add_author = data.get("pnx").get("addata")["addau"] |
|
except KeyError: |
|
add_author = [] |
|
|
|
authors = list(set(author)) + list(set(add_author)) |
|
authors = "; ".join(authors) |
|
|
|
if len(authors) < 1: |
|
try: |
|
authors = data.get("pnx").get("sort")["author"] |
|
authors = "; ".join(authors) |
|
except KeyError: |
|
pass |
|
|
|
if len(authors) < 1: |
|
try: |
|
authors = data.get("pnx").get("display")["creator"] |
|
authors = "; ".join(authors) |
|
except KeyError: |
|
authors = "missing" |
|
|
|
return (authors) |
|
|
|
|
|
def find_title_json(data: dict[str,dict[str,dict[str,str]]]) -> str: |
|
""" |
|
A function for finding the title from various possible locations in the json metadata. |
|
""" |
|
try: |
|
title = data.get("pnx").get("display")["title"][0] |
|
except KeyError: |
|
title = [] |
|
if len(title) < 1: |
|
try: |
|
title = data.get("pnx").get("addata")["btitle"][0] |
|
except KeyError: |
|
pass |
|
else: |
|
pass |
|
if len(title) < 1: |
|
try: |
|
title = data.get("pnx").get("sort")["title"][0] |
|
except KeyError: |
|
pass |
|
if len(title) < 1: |
|
title = "missing" |
|
return(title) |
|
|
|
|
|
def find_digitalization(data: dict[str,dict[str,dict[str,str]]]) -> str: |
|
""" |
|
A function for finding the digitalization date from various possible locations in the json metadata. |
|
""" |
|
try: |
|
digit = data.get("pnx").get("display")["creationdate"][0] |
|
|
|
digit = re.findall(r"\d{4}$",digit)[0] |
|
except KeyError: |
|
digit = [] |
|
if len(digit) < 1: |
|
try: |
|
digit = data.get("pnx").get("addata")["date"][1] |
|
digit = re.findall(r"\d{4}$",digit)[0] |
|
except KeyError: |
|
digit = "missing" |
|
return(digit) |
|
|
|
def find_source(data: dict[str,dict[str,dict[str,str]]]) -> str: |
|
""" |
|
A function for finding source of the document from the json metadata. |
|
""" |
|
try: |
|
doc_source = data.get("pnx").get("display")["lds50"] |
|
|
|
doc_source = [i for i in doc_source if "Digi" not in i][0] |
|
except (KeyError, IndexError): |
|
doc_source = "missing" |
|
return doc_source |
|
|
|
|
|
def dead_70_yrs_ago(ds): |
|
"""filter for the scraped authors to find ones who have died 70 years ago""" |
|
birth_miss = False |
|
death_miss = False |
|
try: |
|
birth_yr = int(ds["born"]) |
|
if birth_yr > 1955: |
|
birth = False |
|
else: |
|
birth = True |
|
except ValueError: |
|
birth = False |
|
birth_miss = True |
|
|
|
try: |
|
death_yr = int(ds["died"]) |
|
if death_yr > 1955: |
|
death = False |
|
else: |
|
death = True |
|
except ValueError: |
|
death = False |
|
death_miss = True |
|
|
|
if ((death and birth) or |
|
(death and birth_miss) or |
|
(death_miss and birth_yr < 1833) |
|
): |
|
filtered = True |
|
else: |
|
filtered = False |
|
|
|
return filtered |
|
|
|
def extract_meta_data(pdf_file:str) -> dict[str,str|int]: |
|
""" |
|
A function for extracting meta data from the json files |
|
includes: |
|
- author(s) |
|
- title |
|
- published |
|
- digitalized |
|
- source |
|
""" |
|
try: |
|
|
|
json_file = pdf_file[:-3] + "json" |
|
f = open(json_file) |
|
data = json.load(f) |
|
|
|
authors = find_author_json(data) |
|
title = find_title_json(data) |
|
digitalized = find_digitalization(data) |
|
doc_source = find_source(data) |
|
|
|
f.close() |
|
except BaseException: |
|
authors = "missing" |
|
title = "missing" |
|
digitalized = "missing" |
|
doc_source = "missing" |
|
return authors, title, digitalized, doc_source |
|
|
|
def simplify_name(author:str) -> str: |
|
""" |
|
function for simplifying repeated single author name separated by ; |
|
eg. "Holck, J.; af J. Holck." -> Holck, J. |
|
""" |
|
simp = author |
|
if ";" in author: |
|
only_uppercase = [re.findall(r"[A-Z][a-z]*",i) for i in author.split(";")] |
|
if len(only_uppercase)==2: |
|
if sorted(only_uppercase[0]) == sorted(only_uppercase[1]): |
|
simp = re.findall(r"^[^;]*",author)[0] |
|
else: |
|
pass |
|
else: |
|
pass |
|
else: |
|
pass |
|
return simp |
|
|
|
def separate_names(author:str) -> list[list[list[str]],int]: |
|
""" |
|
function for separating different authors and their |
|
- separates by ";" |
|
- matches strings starting with uppercase letters |
|
""" |
|
authors = re.findall(r"([^;]*)",author) |
|
authors = list(filter(None,authors)) |
|
authors = [re.findall(r"([A-Z]\w*)",i) for i in authors] |
|
n_author = len(authors) |
|
return authors, n_author |
|
|
|
def check_copyright(pub_year:int, |
|
cover_page_text: str, |
|
filename: str, |
|
ref_filenames: list[str] |
|
) -> bool: |
|
""" |
|
Function for checking public domain status based on: |
|
- year published, |
|
- if the digitalising party claims it is copyright free |
|
- if the filename can be matched to a name from an outside source |
|
""" |
|
if pub_year < 1833: |
|
public_domain = True |
|
elif ("free of copyright" in cover_page_text): |
|
public_domain = True |
|
elif filename in ref_filenames: |
|
public_domain = True |
|
else: |
|
public_domain = False |
|
|
|
return public_domain |
|
|
|
def convert_pdf_to_dataset(file_name: str, |
|
path_to_file: str, |
|
demo: bool = False) -> Dataset: |
|
|
|
"""Converts pdf to image and a dataset with rows by page, based on: |
|
https://thepythoncode.com/article/convert-pdf-files-to-images-in-python |
|
""" |
|
|
|
input_file=os.path.join(path_to_file,file_name) |
|
|
|
doc_id = re.sub(" ","_",file_name) |
|
doc_id = ''.join(filter(lambda x: x not in string.punctuation, doc_id)) |
|
doc_id = re.sub(r"alma|pdf","",doc_id) |
|
|
|
pub_year = file_name[:4] |
|
|
|
author, title, digitalized, doc_source = extract_meta_data(input_file) |
|
|
|
pdfIn = fitz.open(input_file) |
|
data_list = [] |
|
|
|
if demo: |
|
page_range = 1 |
|
else: |
|
page_range = pdfIn.page_count |
|
|
|
for pg in range(page_range): |
|
page = pdfIn[pg] |
|
|
|
page_text = page.get_text() |
|
|
|
if pg == 0: |
|
|
|
try: |
|
text_solid = re.sub("\n","",page_text) |
|
except TypeError: |
|
|
|
text_solid = "missing" |
|
|
|
if author == "missing": |
|
try: |
|
author = re.search(r"(?<=Author\(s\):)(.*?)(?=Titel)",text_solid)[0] |
|
|
|
author = simplify_name(author) |
|
|
|
except TypeError: |
|
|
|
author = "missing" |
|
finally: |
|
|
|
if len(author) == 0: |
|
author = "missing" |
|
else: |
|
pass |
|
|
|
if title == "missing": |
|
try: |
|
title = re.search(r"(?<=Title:)(.*?)(?=Udgivet)",text_solid)[0] |
|
except TypeError: |
|
title = "missing" |
|
|
|
copyright_free = check_copyright(int(pub_year), |
|
text_solid, |
|
file_name, |
|
ref_pd_list) |
|
|
|
else: |
|
|
|
pass |
|
|
|
if not copyright_free: |
|
|
|
break |
|
|
|
|
|
rotate = int(0) |
|
|
|
zoom_x = 2 |
|
zoom_y = 2 |
|
|
|
mat = fitz.Matrix(zoom_x, zoom_y).prerotate(rotate) |
|
pix = page.get_pixmap(matrix=mat, alpha=False) |
|
page_img = pix.pil_image() |
|
page_id = f"{doc_id}_p{pg+1}" |
|
|
|
if type(author) == list: |
|
author = "; ".join(author) |
|
else: |
|
pass |
|
|
|
meta_data ={"doc_id" : doc_id, |
|
"page_id" : page_id, |
|
"page_image" : page_img, |
|
"page_text": page_text, |
|
"author": author, |
|
"title" : title, |
|
"published": pub_year, |
|
"digitalized": digitalized, |
|
"source": doc_source, |
|
"file_name": file_name} |
|
data_list.append(meta_data) |
|
|
|
pdfIn.close() |
|
|
|
if copyright_free: |
|
ds = Dataset.from_list(data_list) |
|
else: |
|
ds = "missing" |
|
return ds |
|
|
|
def make_year_list(start_year: int, stop_year: int) -> list[str]: |
|
"""make a list of file names based on years""" |
|
year_list = list(range(start_year, stop_year + 1)) |
|
year_list = [str(i) for i in year_list] |
|
return year_list |
|
|
|
|
|
adl_filter = lambda ds: ds["source"] != "ADLFBI" |
|
|
|
def split(a, n): |
|
"splits list into n roughly equal parts" |
|
k, m = divmod(len(a), n) |
|
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)) |
|
|
|
def remove(path): |
|
""" param <path> could either be relative or absolute. """ |
|
if os.path.isfile(path) or os.path.islink(path): |
|
os.remove(path) |
|
elif os.path.isdir(path): |
|
shutil.rmtree(path) |
|
else: |
|
raise ValueError("file {} is not a file or dir.".format(path)) |
|
|
|
def reorganize_data(output_path: str, shard_size: str = "5"): |
|
""" Loads the temporary data folders in the data path and creates 5GB shards for each year, deletes temporary files |
|
""" |
|
folders = os.listdir(output_path) |
|
temp_folders = [i for i in folders if "_t" in i] |
|
if len(temp_folders) == 0: |
|
print("DATA ORGANIZED") |
|
return |
|
print("REORGANIZING DATA...") |
|
for t_fold in tqdm(temp_folders): |
|
|
|
data_path = os.path.join(output_path,t_fold) |
|
data_set = load_dataset(data_path, split = "train") |
|
|
|
year_str = t_fold[:-2] |
|
new_data_path = os.path.join(output_path,year_str) |
|
data_set.save_to_disk(new_data_path, max_shard_size="5GB") |
|
|
|
try : |
|
remove(data_path) |
|
except PermissionError as e: |
|
print(f"{e}") |
|
|
|
|
|
def main(): |
|
sub_folders = os.listdir(input_path) |
|
for u in unwanted_folders: |
|
sub_folders.remove(u) |
|
|
|
year_list = make_year_list(start_year,stop_year) |
|
sub_folders = sorted([i for i in sub_folders if i in year_list]) |
|
|
|
chunks = [sub_folders[i:i + n_chunks] for i in range(0, len(sub_folders), n_chunks)] |
|
|
|
logger.info(f"Extracting from PDFs...{sub_folders[0]}-{sub_folders[-1]}") |
|
for ch in tqdm(chunks): |
|
problem_list =[] |
|
for sfolder in ch: |
|
|
|
sfp = os.path.join(input_path,sfolder) |
|
files = [i for i in os.listdir(sfp) if ".pdf" in i] |
|
|
|
|
|
|
|
batched_files = [files[i:i + n_batch] for i in range(0, len(files), n_batch)] |
|
for batch_nr, batch in enumerate(batched_files): |
|
ds=[] |
|
for i in batch: |
|
try: |
|
temporary_ds = convert_pdf_to_dataset(i,sfp,demo) |
|
if temporary_ds is None: |
|
pass |
|
else: |
|
print(temporary_ds[0]["file_name"]) |
|
for j in temporary_ds: |
|
ds.append(j) |
|
|
|
except BaseException as e: |
|
logger.info(f"FILE ERROR: {os.path.join(sfp,i)}") |
|
logger.info(f"ERROR: {e}") |
|
problem_list.append(i) |
|
|
|
logger.info(f"Assembling Dataset: {ch[0]}-{ch[-1]}, BATCH:{batch_nr}") |
|
|
|
if len(ds) == 0: |
|
continue |
|
ds = Dataset.from_list(ds) |
|
|
|
ds = ds.filter(adl_filter) |
|
ds = ds.remove_columns("source") |
|
|
|
|
|
save_path = os.path.join(output_path,f"{sfolder}_t",f"{source}_{ch[0]}-{ch[-1]}_{batch_nr}.parquet") |
|
ds.to_parquet(save_path) |
|
|
|
logger.info(f"FOLDER DONE: {sfolder}") |
|
|
|
|
|
if len(problem_list) >= 1: |
|
with open(os.path.join(logs,f"problems_{ch[0]}-{ch[-1]}.txt"), 'w') as outfile: |
|
outfile.write('\n'.join(str(i) for i in problem_list)) |
|
else: |
|
pass |
|
|
|
ds = None |
|
temporary_ds = None |
|
del ds |
|
del temporary_ds |
|
reorganize_data(output_path) |
|
|
|
|
|
if __name__ == "__main__": |
|
log_path = os.path.join(logs,"extract.log") |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s - %(levelname)s - %(message)s", |
|
handlers=[ |
|
logging.StreamHandler(), |
|
logging.FileHandler(log_path), |
|
], |
|
) |
|
main() |