|
|
|
|
|
from datetime import datetime |
|
import json |
|
import lxml.html |
|
from lxml.cssselect import CSSSelector |
|
|
|
import requests |
|
import xmltodict |
|
|
|
|
|
r = requests.get("https://en.wikinews.org/w/index.php?title=Special:NewsFeed&feed=atom&categories=Published&count=5") |
|
data = xmltodict.parse(r.content) |
|
|
|
outputs = [] |
|
entries = data["feed"]["entry"] |
|
for en in entries: |
|
|
|
|
|
dtme = datetime.strptime(en["updated"], "%Y-%m-%dT%H:%M:%SZ") |
|
dt = dtme.strftime("%Y/%m/%d") |
|
|
|
summ = lxml.html.fromstring(en["summary"]["#text"]) |
|
|
|
selAnchor = CSSSelector('a[rel="nofollow"]') |
|
foundElements = selAnchor(summ) |
|
for el in foundElements: |
|
print(el.get("href")) |
|
|
|
plaintxt = summ.text_content() |
|
if 'Have an opinion on this story?' in plaintxt: |
|
plaintxt = plaintxt[:plaintxt.find('Have an opinion on this story?')] |
|
|
|
|
|
outputs.append({ "question_date": dt, "question_url": en["link"]["@href"], }) |
|
|
|
tstamp = datetime.now().strftime("%Y%m%d") |
|
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi: |
|
for idx, op in enumerate(outputs): |
|
op["question_id"] = f"{tstamp}_{idx}" |
|
op["question_source"] = "WikiNews" |
|
fi.write(json.dumps(op) + "\n") |
|
|