|
import os |
|
import re |
|
import requests |
|
from tqdm import tqdm |
|
from datasets import Dataset |
|
from bs4 import BeautifulSoup |
|
|
|
|
|
def from_wiki_script(author_soup): |
|
try: |
|
text = str(author_soup.find_all("script")[0]) |
|
try: |
|
birth = re.findall(r"Født i \d{4}",text)[0] |
|
birth = re.findall(r"\d{4}",birth)[0] |
|
except IndexError: |
|
birth = None |
|
|
|
try: |
|
death = re.findall(r"Døde i \d{4}",text)[0] |
|
death = re.findall(r"\d{4}",death)[0] |
|
except IndexError: |
|
death = None |
|
except KeyError: |
|
birth = None |
|
death = None |
|
return birth, death |
|
|
|
def from_infobox(author_soup): |
|
|
|
try: |
|
boxes = author_soup.find_all("table") |
|
try: |
|
boxes = [i for i in boxes if "biography" in i["class"]][0] |
|
try: |
|
|
|
death = re.findall(r"(?<=Død).*?[^\d]*(\d{4})",str(boxes))[0] |
|
except IndexError: |
|
death = None |
|
try: |
|
birth = re.findall(r"(?<=Født).*?[^\d]*(\d{4})",str(boxes))[0] |
|
except IndexError: |
|
birth = None |
|
except IndexError: |
|
birth = None |
|
death = None |
|
except KeyError: |
|
birth = None |
|
death = None |
|
|
|
return birth, death |
|
|
|
|
|
def from_wiki_text(author_soup): |
|
try: |
|
text = list(author_soup.find_all("p"))[0].get_text() |
|
try: |
|
birth = re.findall(r"\d{4}",text)[0] |
|
except IndexError: |
|
birth = None |
|
try: |
|
death = re.findall(r"\d{4}",text)[1] |
|
except IndexError: |
|
death = None |
|
except KeyError: |
|
birth = None |
|
death = None |
|
|
|
return birth, death |
|
|
|
def none_to_q(val:str) -> str: |
|
""" If value is None replaces it with ?""" |
|
if val is None: |
|
val = "?" |
|
else: |
|
pass |
|
return val |
|
|
|
def find_wiki_birth_death(author_soup): |
|
birth, death = from_wiki_script(author_soup) |
|
if birth is None and death is None: |
|
birth, death = from_infobox(author_soup) |
|
else: |
|
pass |
|
if birth is None and death is None: |
|
birth, death = from_wiki_text(author_soup) |
|
else: |
|
pass |
|
|
|
birth = none_to_q(birth) |
|
death = none_to_q(death) |
|
|
|
return birth, death |
|
|
|
|
|
def is_a_person(tag): |
|
return( (tag.has_attr('href')) and |
|
(tag.has_attr('title')) and |
|
(len(tag.attrs) == 2) and |
|
("index" not in tag.get("href")) and |
|
(":") not in tag.get("href")) |
|
|
|
def is_a_person_commons(tag): |
|
return( (tag.has_attr('href')) and |
|
(tag.has_attr('title')) and |
|
(len(tag.attrs) == 2) and |
|
("index" not in tag.get("href")) and |
|
(("Writers" not in tag.get("title")) and |
|
("ategories" not in tag.get("title")) and |
|
("Denmark" not in tag.get("title"))) and |
|
("Category" in tag.get("title")) |
|
) |
|
|
|
def is_a_subcategory(tag): |
|
return( (tag.has_attr('href')) and |
|
(tag.has_attr('title')) and |
|
("Dansksprogede" in tag.get("title")) |
|
) |
|
|
|
def flatten(twodlist :list[list,list]) -> list: |
|
""" flatten a list by 1 dimension""" |
|
onedlist = [x for xs in twodlist for x in xs] |
|
return onedlist |
|
|
|
def extract_authors(people, |
|
authors:list[dict[str,str]], |
|
name_list:list[str] |
|
) -> list[list[dict[str,str]], list[str]]: |
|
for i in people: |
|
author_name = i.get("title") |
|
author_link = i.get("href") |
|
if author_name not in name_list: |
|
|
|
author_page = requests.get(f"https://da.wikipedia.org{author_link}") |
|
author_soup = BeautifulSoup(author_page.content, 'html.parser') |
|
|
|
birth, death = find_wiki_birth_death(author_soup) |
|
|
|
author_row={ |
|
"link": f"https://da.wikipedia.org{author_link}", |
|
"name":author_name, |
|
"born":birth, |
|
"died":death, |
|
"name_yr":f"{author_name} ({birth}-{death})" |
|
} |
|
authors.append(author_row) |
|
name_list.append(author_name) |
|
else: |
|
pass |
|
return authors, name_list |
|
|
|
def extract_authors_commons(people, |
|
authors:list[dict[str,str]], |
|
name_list:list[str] |
|
) -> list[list[dict[str,str]], list[str]]: |
|
for i in people: |
|
author_name = i.get_text() |
|
author_link = i.get("href") |
|
if author_name not in name_list: |
|
|
|
author_page = requests.get(f"https://commons.wikimedia.org{author_link}") |
|
author_soup = BeautifulSoup(author_page.content, 'html.parser') |
|
|
|
boxes = author_soup.find_all("table") |
|
try: |
|
box = [i for i in boxes if "Date of birth" in str(i)][0] |
|
try: |
|
|
|
death = re.findall(r"(?<=Date of death).*?[^\d]*(\d{4})",str(box))[0] |
|
except IndexError: |
|
death = None |
|
try: |
|
birth = re.findall(r"(?<=Date of birth).*?[^\d]*(\d{4})",str(box))[0] |
|
except IndexError: |
|
birth = None |
|
except IndexError: |
|
birth = None |
|
death = None |
|
|
|
birth = none_to_q(birth) |
|
death = none_to_q(death) |
|
|
|
author_row={ |
|
"link": f"https://commons.wikimedia.org{author_link}", |
|
"name":author_name, |
|
"born":birth, |
|
"died":death, |
|
"name_yr":f"{author_name} ({birth}-{death})" |
|
} |
|
authors.append(author_row) |
|
name_list.append(author_name) |
|
else: |
|
pass |
|
|
|
return authors, name_list |
|
|
|
def is_next_page(tag): |
|
return (tag.get_text() == "næste side") |
|
|
|
def main(): |
|
authors = [] |
|
name_list = [] |
|
|
|
print(f"https://da.wikipedia.org/wiki/Kategori:Dansksprogede_forfattere") |
|
page = requests.get(f"https://da.wikipedia.org/wiki/Kategori:Dansksprogede_forfattere") |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
|
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person)) |
|
authors, name_list = extract_authors( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
|
|
sub_c = soup.find_all("ul" and "li" and "a" and is_a_subcategory) |
|
for i in sub_c: |
|
|
|
if "Danmark" not in i.get("title"): |
|
|
|
new_link = f"https://da.wikipedia.org/{i.get("href")}" |
|
page = requests.get(new_link) |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person)) |
|
authors, name_list = extract_authors( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
print(f"DONE: {i.get("title")}") |
|
|
|
elif "Danmark" in i.get("title"): |
|
|
|
print("Processing Authors from Denmark (alphabetic order)...\n") |
|
|
|
alphabet_list = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z Æ Ø Å".split() |
|
for letter in tqdm(alphabet_list): |
|
page = requests.get(f"https://da.wikipedia.org/w/index.php?title=Kategori:Dansksprogede_forfattere_fra_Danmark&from={letter}") |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person)) |
|
authors, name_list = extract_authors( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
|
|
print(f"Processing https://commons.wikimedia.org/wiki/Category:Writers_from_Denmark_by_name") |
|
|
|
abc_list = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split() |
|
for abc in tqdm(abc_list): |
|
page = requests.get(f"https://commons.wikimedia.org/w/index.php?title=Category:Writers_from_Denmark_by_name&from={abc}") |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person_commons)) |
|
authors, name_list = extract_authors_commons( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
|
|
print(f"Processing https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon") |
|
|
|
|
|
for abc in tqdm(abc_list): |
|
page = requests.get(f"https://commons.wikimedia.org/w/index.php?title=Category:Writers_from_Denmark_by_name&from={abc}") |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person_commons)) |
|
authors, name_list = extract_authors_commons( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
|
|
p_counter = 0 |
|
print(f"Processing https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon") |
|
|
|
page = requests.get(f"https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon") |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
|
|
while len(soup.find_all("a" and is_next_page)) > 0: |
|
|
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person)) |
|
authors, name_list = extract_authors( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
p_counter += 1 |
|
new_page = soup.find_all("a" and is_next_page)[0]["href"] |
|
new_link = f"https://da.wikipedia.org/{new_page}" |
|
page = requests.get(new_link) |
|
soup = BeautifulSoup(page.content, 'html.parser') |
|
print(f"Scraped page {p_counter}/~30...") |
|
else: |
|
|
|
print("Scraping last page...") |
|
people = list(soup.find_all("ul" and "li" and "a" and is_a_person)) |
|
authors, name_list = extract_authors( |
|
people, |
|
authors, |
|
name_list |
|
) |
|
|
|
ds = Dataset.from_list(authors) |
|
ds.to_parquet(os.path.join(".","da_people_large.parquet")) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |