relive-qa / scrape_with_openai.py
monsoon-nlp's picture
rework prompt and meaningful links
f77f672
raw
history blame
2.63 kB
# pip install openai lxml cssselector requests xmltodict
from datetime import datetime
import json
import lxml.html
from lxml.cssselect import CSSSelector
from openai import OpenAI
import requests
import xmltodict
client = OpenAI()
# load from WikiNews English
r = requests.get("https://en.wikinews.org/w/index.php?title=Special:NewsFeed&feed=atom&categories=Published&count=5")
data = xmltodict.parse(r.content)
outputs = []
entries = data["feed"]["entry"]
for en in entries:
# en["summary"]["#text"]
# en["title"]
dtme = datetime.strptime(en["updated"], "%Y-%m-%dT%H:%M:%SZ")
dt = dtme.strftime("%Y/%m/%d")
summ = lxml.html.fromstring(en["summary"]["#text"])
selAnchor = CSSSelector('a[rel="nofollow"]')
foundElements = selAnchor(summ)
articleLinks = []
for el in foundElements:
link = el.get('href')
if '.com/intent/tweet' in link or 'facebook.com/sharer.php' in link or 'mailto:' in link or 'reddit.com/submit' in link or 'linkedin.com/shareArticle' in link:
continue
articleLinks.append(link)
plaintxt = summ.text_content()
if 'Have an opinion on this story?' in plaintxt:
plaintxt = plaintxt[:plaintxt.find('Have an opinion on this story?')]
# print(plaintxt)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "system",
"content": "You will be provided with an article from today's news. Provide 3-5 multiple choice questions based on the content of the article, especially newly-introduced facts or knowledge. Don't make the correct answer any more specific, numeric, or realistic compared to the others.\n Respond in JSON format: [{ question: 'Who was elected president of Sesame Street?', choices: ['Big Bird', 'Donald Duck'], answer: 'Big Bird' }]",
},
{
"role": "user",
"content": f"Here's the article: \n{plaintxt}",
},
],
)
reply = response.choices[0].message.content
reply = reply[reply.index('[') : reply.rindex(']') + 1]
qs = json.loads(reply)
for q in qs:
if q["answer"] not in q["choices"]:
continue
outputs.append({
"question_date": dt,
"question_url": en["link"]["@href"],
"question_sentence": q["question"],
"links": articleLinks,
"choices": q["choices"],
"answer_text": q["answer"],
"answer": [ q["choices"].index(q["answer"]) ],
})
tstamp = datetime.now().strftime("%Y%m%d")
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi:
for idx, op in enumerate(outputs):
op["question_id"] = f"{tstamp}_{idx}"
op["question_source"] = "WikiNews"
fi.write(json.dumps(op) + "\n")