Commit
·
e2a3756
1
Parent(s):
86100db
demo scraping recent WikiNews
Browse files- 20240519_qa_public.jsonl +5 -0
- README.md +2 -1
- scrape_morerecent_with_openai.py +116 -0
20240519_qa_public.jsonl
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"question_date": "2024/05/14", "question_url": "https://en.wikinews.org/wiki/Police_identify_man%27s_body_found_in_a_canal_in_Miami_Gardens,_Florida", "question_sentence": "Where was Christopher Lee Henry's body found?", "links": ["https://www.cbsnews.com/miami/news/police-id-body-of-man-found-shot-multiple-times-bound-along-miami-gardens-canal/", "https://www.msn.com/en-us/news/other/police-id-man-found-shot-to-death-in-miami-gardens-with-hands-tied-and-bag-on-his-head/ar-BB1mkmUi?ocid=BingNewsSearch"], "choices": ["In a park", "Along a canal bank", "In a shopping mall", "In a forest"], "answer_text": "Along a canal bank", "answer": [1], "question_id": "20240519_0", "question_source": "WikiNews"}
|
| 2 |
+
{"question_date": "2024/05/14", "question_url": "https://en.wikinews.org/wiki/Police_identify_man%27s_body_found_in_a_canal_in_Miami_Gardens,_Florida", "question_sentence": "What was notable about the condition in which Christopher Lee Henry was found?", "links": ["https://www.cbsnews.com/miami/news/police-id-body-of-man-found-shot-multiple-times-bound-along-miami-gardens-canal/", "https://www.msn.com/en-us/news/other/police-id-man-found-shot-to-death-in-miami-gardens-with-hands-tied-and-bag-on-his-head/ar-BB1mkmUi?ocid=BingNewsSearch"], "choices": ["He was wearing a suit", "His hands were bound behind his back", "He was unscathed", "He was holding a note"], "answer_text": "His hands were bound behind his back", "answer": [1], "question_id": "20240519_1", "question_source": "WikiNews"}
|
| 3 |
+
{"question_date": "2024/05/14", "question_url": "https://en.wikinews.org/wiki/Police_identify_man%27s_body_found_in_a_canal_in_Miami_Gardens,_Florida", "question_sentence": "What did the police offer for information leading to an arrest?", "links": ["https://www.cbsnews.com/miami/news/police-id-body-of-man-found-shot-multiple-times-bound-along-miami-gardens-canal/", "https://www.msn.com/en-us/news/other/police-id-man-found-shot-to-death-in-miami-gardens-with-hands-tied-and-bag-on-his-head/ar-BB1mkmUi?ocid=BingNewsSearch"], "choices": ["$500", "$1,000", "$5,000", "$10,000"], "answer_text": "$5,000", "answer": [2], "question_id": "20240519_2", "question_source": "WikiNews"}
|
| 4 |
+
{"question_date": "2024/05/14", "question_url": "https://en.wikinews.org/wiki/Police_identify_man%27s_body_found_in_a_canal_in_Miami_Gardens,_Florida", "question_sentence": "How long has Angie Ardavin, the witness, lived in the area?", "links": ["https://www.cbsnews.com/miami/news/police-id-body-of-man-found-shot-multiple-times-bound-along-miami-gardens-canal/", "https://www.msn.com/en-us/news/other/police-id-man-found-shot-to-death-in-miami-gardens-with-hands-tied-and-bag-on-his-head/ar-BB1mkmUi?ocid=BingNewsSearch"], "choices": ["5 years", "10 years", "15 years", "20 years"], "answer_text": "15 years", "answer": [2], "question_id": "20240519_3", "question_source": "WikiNews"}
|
| 5 |
+
{"question_date": "2024/05/14", "question_url": "https://en.wikinews.org/wiki/Police_identify_man%27s_body_found_in_a_canal_in_Miami_Gardens,_Florida", "question_sentence": "What was the method of murder used on Christopher Lee Henry?", "links": ["https://www.cbsnews.com/miami/news/police-id-body-of-man-found-shot-multiple-times-bound-along-miami-gardens-canal/", "https://www.msn.com/en-us/news/other/police-id-man-found-shot-to-death-in-miami-gardens-with-hands-tied-and-bag-on-his-head/ar-BB1mkmUi?ocid=BingNewsSearch"], "choices": ["Stabbing", "Strangulation", "Shooting", "Poisoning"], "answer_text": "Shooting", "answer": [2], "question_id": "20240519_4", "question_source": "WikiNews"}
|
README.md
CHANGED
|
@@ -22,6 +22,7 @@ Prerequisites: `pip install openai lxml cssselector requests xmltodict` and Open
|
|
| 22 |
I've added two scripts:
|
| 23 |
- scrape.py : base script to load plain text from the latest WikiNews articles
|
| 24 |
- scrape_with_openai.py : pass scraped text to OpenAI's GPT-4o to generate questions and answers for each article
|
|
|
|
| 25 |
|
| 26 |
An LLM evaluated on this Q&A could read the WikiNews summary, the links collected from the story, or do its own web searches.
|
| 27 |
|
|
@@ -48,4 +49,4 @@ year={2023},
|
|
| 48 |
eprint={2207.13332},
|
| 49 |
url={https://openreview.net/forum?id=HfKOIPCvsv}
|
| 50 |
}
|
| 51 |
-
```
|
|
|
|
| 22 |
I've added two scripts:
|
| 23 |
- scrape.py : base script to load plain text from the latest WikiNews articles
|
| 24 |
- scrape_with_openai.py : pass scraped text to OpenAI's GPT-4o to generate questions and answers for each article
|
| 25 |
+
- scrape_morerecent_with_openai.py : scrape recent articles which WikiNews hasn't yet published from `Category:May 2024`, then use OpenAI's GPT-4o for Q&A
|
| 26 |
|
| 27 |
An LLM evaluated on this Q&A could read the WikiNews summary, the links collected from the story, or do its own web searches.
|
| 28 |
|
|
|
|
| 49 |
eprint={2207.13332},
|
| 50 |
url={https://openreview.net/forum?id=HfKOIPCvsv}
|
| 51 |
}
|
| 52 |
+
```
|
scrape_morerecent_with_openai.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# pip install openai lxml cssselector requests xmltodict
|
| 2 |
+
|
| 3 |
+
from datetime import date, datetime, timedelta
|
| 4 |
+
import json
|
| 5 |
+
import lxml.html
|
| 6 |
+
from lxml.cssselect import CSSSelector
|
| 7 |
+
import time
|
| 8 |
+
|
| 9 |
+
from openai import OpenAI
|
| 10 |
+
import requests
|
| 11 |
+
import xmltodict
|
| 12 |
+
|
| 13 |
+
tabooWords = ['deletion', 'rape', 'rapist', 'abuse', 'minor']
|
| 14 |
+
|
| 15 |
+
# load from WikiNews English recent days (not yet published)
|
| 16 |
+
recentArticles = []
|
| 17 |
+
dateForArticle = {}
|
| 18 |
+
today = date.today()
|
| 19 |
+
for days in range(5, 12):
|
| 20 |
+
tdate = (today - timedelta(days=days))
|
| 21 |
+
tstamp = tdate.strftime("%B_%d,_%Y")
|
| 22 |
+
r = requests.get(f"https://en.wikinews.org/wiki/Category:{tstamp}")
|
| 23 |
+
contents = lxml.html.fromstring(r.content)
|
| 24 |
+
selAnchor = CSSSelector('a')
|
| 25 |
+
for linkEl in selAnchor(contents):
|
| 26 |
+
link = str(linkEl.get('href'))
|
| 27 |
+
if link[:6] == '/wiki/' and '/Special:' not in link and '/Category:' not in link and 'Main_Page' not in link and 'Help:' not in link and 'Wikinews:' not in link and 'File:' not in link:
|
| 28 |
+
recentArticles.append(link)
|
| 29 |
+
dateForArticle[link] = tdate.strftime("%Y/%m/%d")
|
| 30 |
+
time.sleep(1)
|
| 31 |
+
|
| 32 |
+
client = OpenAI()
|
| 33 |
+
|
| 34 |
+
outputs = []
|
| 35 |
+
for article in recentArticles:
|
| 36 |
+
print(article)
|
| 37 |
+
r = requests.get(f"https://en.wikinews.org{article}")
|
| 38 |
+
contents = lxml.html.fromstring(r.content)
|
| 39 |
+
|
| 40 |
+
selMain = CSSSelector('.mw-body-content p')
|
| 41 |
+
plaintxt = ""
|
| 42 |
+
for para in selMain(contents):
|
| 43 |
+
c = para.text_content()
|
| 44 |
+
if 'pre-publication review' in c or 'last amended' in c:
|
| 45 |
+
continue
|
| 46 |
+
plaintxt += c + "\n"
|
| 47 |
+
if 'Have an opinion' in plaintxt:
|
| 48 |
+
plaintxt = plaintxt[:plaintxt.index('Have an opinion')]
|
| 49 |
+
|
| 50 |
+
plaintxt = plaintxt.strip()
|
| 51 |
+
|
| 52 |
+
block = False
|
| 53 |
+
for taboo in tabooWords:
|
| 54 |
+
if taboo in plaintxt.lower():
|
| 55 |
+
block = True
|
| 56 |
+
if block:
|
| 57 |
+
print("Article marked for deletion or about subject sensitive for AI summarization")
|
| 58 |
+
continue
|
| 59 |
+
|
| 60 |
+
dt = dateForArticle[article]
|
| 61 |
+
|
| 62 |
+
selAnchor = CSSSelector('a[rel="nofollow"]')
|
| 63 |
+
foundElements = selAnchor(contents)
|
| 64 |
+
articleLinks = []
|
| 65 |
+
for el in foundElements:
|
| 66 |
+
link = el.get('href')
|
| 67 |
+
|
| 68 |
+
linkblocks = ['/wiki/', '.com/intent/tweet', 'creativecommons.org/licenses', 'facebook.com/sharer.php', 'mailto:', 'reddit.com/submit', 'linkedin.com/shareArticle']
|
| 69 |
+
block = False
|
| 70 |
+
for blocker in linkblocks:
|
| 71 |
+
if blocker in link.lower():
|
| 72 |
+
block = True
|
| 73 |
+
if block:
|
| 74 |
+
continue
|
| 75 |
+
|
| 76 |
+
articleLinks.append(link)
|
| 77 |
+
|
| 78 |
+
qs = []
|
| 79 |
+
response = client.chat.completions.create(
|
| 80 |
+
model="gpt-4o",
|
| 81 |
+
messages=[
|
| 82 |
+
{
|
| 83 |
+
"role": "system",
|
| 84 |
+
"content": "You will be provided with an article from today's news. Provide 3-5 multiple choice questions based on the content of the article, especially newly-introduced facts or knowledge. Don't make the correct answer any more specific, numeric, or realistic compared to the others.\n Respond in JSON format: [{ question: 'Who was elected president of Sesame Street?', choices: ['Big Bird', 'Donald Duck'], answer: 'Big Bird' }]",
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"role": "user",
|
| 88 |
+
"content": f"Here's the article: \n{plaintxt}",
|
| 89 |
+
},
|
| 90 |
+
],
|
| 91 |
+
)
|
| 92 |
+
reply = response.choices[0].message.content
|
| 93 |
+
reply = reply[reply.index('[') : reply.rindex(']') + 1]
|
| 94 |
+
qs = json.loads(reply)
|
| 95 |
+
|
| 96 |
+
for q in qs:
|
| 97 |
+
if q["answer"] not in q["choices"]:
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
outputs.append({
|
| 101 |
+
"question_date": dt,
|
| 102 |
+
"question_url": f"https://en.wikinews.org{article}",
|
| 103 |
+
"question_sentence": q["question"],
|
| 104 |
+
"links": articleLinks,
|
| 105 |
+
"choices": q["choices"],
|
| 106 |
+
"answer_text": q["answer"],
|
| 107 |
+
"answer": [ q["choices"].index(q["answer"]) ],
|
| 108 |
+
})
|
| 109 |
+
time.sleep(1)
|
| 110 |
+
|
| 111 |
+
tstamp = datetime.now().strftime("%Y%m%d")
|
| 112 |
+
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi:
|
| 113 |
+
for idx, op in enumerate(outputs):
|
| 114 |
+
op["question_id"] = f"{tstamp}_{idx}"
|
| 115 |
+
op["question_source"] = "WikiNews"
|
| 116 |
+
fi.write(json.dumps(op) + "\n")
|