relive-qa / scrape.py
monsoon-nlp's picture
openai help demo
bbabc35
raw
history blame
1.27 kB
# pip install lxml cssselector requests xmltodict
from datetime import datetime
import json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import xmltodict
# load from WikiNews English
r = requests.get("https://en.wikinews.org/w/index.php?title=Special:NewsFeed&feed=atom&categories=Published&count=5")
data = xmltodict.parse(r.content)
outputs = []
entries = data["feed"]["entry"]
for en in entries:
# en["summary"]["#text"]
# en["title"]
dtme = datetime.strptime(en["updated"], "%Y-%m-%dT%H:%M:%SZ")
dt = dtme.strftime("%Y/%m/%d")
summ = lxml.html.fromstring(en["summary"]["#text"])
selAnchor = CSSSelector('a[rel="nofollow"]')
foundElements = selAnchor(summ)
for el in foundElements:
print(el.get("href"))
plaintxt = summ.text_content()
if 'Have an opinion on this story?' in plaintxt:
plaintxt = plaintxt[:plaintxt.find('Have an opinion on this story?')]
# print(plaintxt)
outputs.append({ "question_date": dt, "question_url": en["link"]["@href"], })
tstamp = datetime.now().strftime("%Y%m%d")
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi:
for idx, op in enumerate(outputs):
op["question_id"] = f"{tstamp}_{idx}"
op["question_source"] = "WikiNews"
fi.write(json.dumps(op) + "\n")