Datasets:

Modalities:
Image
Text
Formats:
arrow
Libraries:
Datasets
License:
File size: 11,182 Bytes
89834ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
import os
import re
import requests
from tqdm import tqdm
from datasets import Dataset
from bs4 import BeautifulSoup

# try to find info 
def from_wiki_script(author_soup):
    try:
        text = str(author_soup.find_all("script")[0])
        try:
            birth = re.findall(r"Født i \d{4}",text)[0]
            birth = re.findall(r"\d{4}",birth)[0]
        except IndexError:
            birth = None

        try:
            death = re.findall(r"Døde i \d{4}",text)[0]
            death = re.findall(r"\d{4}",death)[0]
        except IndexError:
            death = None
    except KeyError:
        birth = None
        death = None
    return birth, death

def from_infobox(author_soup):
    #right infobox
    try:
        boxes = author_soup.find_all("table")
        try:
            boxes = [i for i in boxes if "biography" in i["class"]][0]
            try:
                #vals first 4 digit nr after string
                death = re.findall(r"(?<=Død).*?[^\d]*(\d{4})",str(boxes))[0]
            except IndexError:
                death = None
            try:
                birth = re.findall(r"(?<=Født).*?[^\d]*(\d{4})",str(boxes))[0]
            except IndexError:
                birth = None
        except IndexError:
            birth = None
            death = None
    except KeyError:
        birth = None
        death = None

    return birth, death

#last resort, find first two 4 digit nums in first textbox
def from_wiki_text(author_soup):
    try:
        text = list(author_soup.find_all("p"))[0].get_text()
        try:
            birth = re.findall(r"\d{4}",text)[0]
        except IndexError:
            birth = None
        try:
            death = re.findall(r"\d{4}",text)[1]
        except IndexError:
            death = None
    except KeyError:
        birth = None
        death = None
    
    return birth, death

def none_to_q(val:str) -> str: 
    """ If value is None replaces it with ?"""
    if val is None:
        val = "?"
    else:
        pass
    return val

def find_wiki_birth_death(author_soup):
    birth, death = from_wiki_script(author_soup)
    if birth is None and death is None:
        birth, death = from_infobox(author_soup)
    else:
        pass
    if birth is None and death is None:
        birth, death = from_wiki_text(author_soup)
    else:
        pass

    birth = none_to_q(birth)
    death = none_to_q(death)

    return birth, death

#filter for people
def is_a_person(tag):
    return( (tag.has_attr('href')) and 
            (tag.has_attr('title')) and 
            (len(tag.attrs) == 2) and 
            ("index" not in tag.get("href")) and
            (":") not in  tag.get("href"))
#filter for wikimedia commons
def is_a_person_commons(tag):
    return( (tag.has_attr('href')) and 
            (tag.has_attr('title')) and 
            (len(tag.attrs) == 2) and 
            ("index" not in tag.get("href")) and
            (("Writers" not in tag.get("title")) and 
             ("ategories" not in tag.get("title")) and 
             ("Denmark"  not in tag.get("title"))) and
            ("Category" in tag.get("title"))
        )
#filter for author subcategories
def is_a_subcategory(tag):
    return( (tag.has_attr('href')) and 
            (tag.has_attr('title')) and
            ("Dansksprogede" in tag.get("title")) 
    )

def flatten(twodlist :list[list,list]) -> list:
    """ flatten a list by 1 dimension"""
    onedlist = [x for xs in twodlist for x in xs]
    return onedlist

def extract_authors(people,
                    authors:list[dict[str,str]],
                    name_list:list[str]
                    ) -> list[list[dict[str,str]], list[str]]:    
    for i in people:
        author_name = i.get("title")
        author_link = i.get("href")
        if author_name not in name_list:
            #find their death
            author_page = requests.get(f"https://da.wikipedia.org{author_link}")
            author_soup = BeautifulSoup(author_page.content, 'html.parser')
                
            birth, death = find_wiki_birth_death(author_soup)
                
            author_row={
                "link": f"https://da.wikipedia.org{author_link}",
                "name":author_name,
                "born":birth,
                "died":death,
                "name_yr":f"{author_name} ({birth}-{death})"
                        }
            authors.append(author_row)
            name_list.append(author_name)
        else:
            pass
    return authors, name_list

def extract_authors_commons(people,
                    authors:list[dict[str,str]],
                    name_list:list[str]
                    ) -> list[list[dict[str,str]], list[str]]:
    for i in people:
        author_name = i.get_text()
        author_link = i.get("href")
        if author_name not in name_list:
            #find their death
            author_page = requests.get(f"https://commons.wikimedia.org{author_link}")
            author_soup = BeautifulSoup(author_page.content, 'html.parser')
                
            boxes = author_soup.find_all("table")
            try:
                box = [i for i in boxes if "Date of birth" in str(i)][0]
                try:
                    #vals first 4 digit nr after string
                    death = re.findall(r"(?<=Date of death).*?[^\d]*(\d{4})",str(box))[0]
                except IndexError:
                    death = None
                try:
                    birth = re.findall(r"(?<=Date of birth).*?[^\d]*(\d{4})",str(box))[0]
                except IndexError:
                    birth = None
            except IndexError:
                birth = None
                death = None

            birth = none_to_q(birth)
            death = none_to_q(death)
                
            author_row={
                "link": f"https://commons.wikimedia.org{author_link}",
                "name":author_name,
                "born":birth,
                "died":death,
                "name_yr":f"{author_name} ({birth}-{death})"
                        }
            authors.append(author_row)
            name_list.append(author_name)
        else:
            pass
        
    return authors, name_list

def is_next_page(tag):
    return (tag.get_text() == "næste side")

def main():
    authors = []
    name_list = []
    #people from main page
    print(f"https://da.wikipedia.org/wiki/Kategori:Dansksprogede_forfattere")
    page = requests.get(f"https://da.wikipedia.org/wiki/Kategori:Dansksprogede_forfattere")
    soup = BeautifulSoup(page.content, 'html.parser')
    #1 get all peeps from page
    people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
    authors, name_list = extract_authors(
            people,
            authors,
            name_list
        )
    ##### go into subcategories
    sub_c = soup.find_all("ul" and "li" and "a" and is_a_subcategory)
    for i in sub_c:
        
        if "Danmark" not in i.get("title"):
            #if author not from denmark (more pages, less people)
            new_link = f"https://da.wikipedia.org/{i.get("href")}"
            page = requests.get(new_link)
            soup = BeautifulSoup(page.content, 'html.parser')
            people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
            authors, name_list = extract_authors(
                                                people,
                                                authors,
                                                name_list
                                                )
            print(f"DONE: {i.get("title")}")

        elif "Danmark" in i.get("title"):
            #if author from denmark (less pages, more people)
            print("Processing Authors from Denmark (alphabetic order)...\n")
            #alphabet_list = "A B".split()
            alphabet_list = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z Æ Ø Å".split()
            for letter in tqdm(alphabet_list):
                page = requests.get(f"https://da.wikipedia.org/w/index.php?title=Kategori:Dansksprogede_forfattere_fra_Danmark&from={letter}")
                soup = BeautifulSoup(page.content, 'html.parser')
                people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
                authors, name_list = extract_authors(
                    people,
                    authors,
                    name_list
                )
    ###other webpages
    print(f"Processing https://commons.wikimedia.org/wiki/Category:Writers_from_Denmark_by_name")
    #abc_list = "A B".split()
    abc_list = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split()
    for abc in tqdm(abc_list):
        page = requests.get(f"https://commons.wikimedia.org/w/index.php?title=Category:Writers_from_Denmark_by_name&from={abc}")
        soup = BeautifulSoup(page.content, 'html.parser')
        people = list(soup.find_all("ul" and "li" and "a" and is_a_person_commons))
        authors, name_list = extract_authors_commons(
                        people,
                        authors,
                        name_list
                    )
    
    print(f"Processing https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon")
    
    #get names from page, next page, repeat
    for abc in tqdm(abc_list):
        page = requests.get(f"https://commons.wikimedia.org/w/index.php?title=Category:Writers_from_Denmark_by_name&from={abc}")
        soup = BeautifulSoup(page.content, 'html.parser')
        people = list(soup.find_all("ul" and "li" and "a" and is_a_person_commons))
        authors, name_list = extract_authors_commons(
                        people,
                        authors,
                        name_list
                    )
    #another webpage
    p_counter = 0
    print(f"Processing https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon")

    page = requests.get(f"https://da.wikipedia.org/wiki/Kategori:Personer_i_Dansk_Biografisk_Leksikon")
    soup = BeautifulSoup(page.content, 'html.parser')
    #get names from page, next page, repeat until no more next page
    while len(soup.find_all("a" and is_next_page)) > 0:

        people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
        authors, name_list = extract_authors(
                        people,
                        authors,
                        name_list
                    )
        p_counter += 1        
        new_page = soup.find_all("a" and is_next_page)[0]["href"]
        new_link = f"https://da.wikipedia.org/{new_page}"
        page = requests.get(new_link)
        soup = BeautifulSoup(page.content, 'html.parser')
        print(f"Scraped page {p_counter}/~30...")
    else:
        #last page
        print("Scraping last page...")
        people = list(soup.find_all("ul" and "li" and "a" and is_a_person))
        authors, name_list = extract_authors(
                        people,
                        authors,
                        name_list
                    )
        
    ds = Dataset.from_list(authors)
    ds.to_parquet(os.path.join(".","da_people_large.parquet"))


if __name__ == "__main__":
    main()