import requests
from lxml import etree


headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
}


def geturls():
    target = "http://www.qstheory.cn/dukan/qs/2014/2019-01/01/c_1123924172.htm"
    response = requests.get(target).content.decode('UTF-8')
    return [span for span in
            re.findall(r'<a href="(.+/dukan/.+\.htm)"', response)]


def parsepage(urls):
    mainbody = []   # 保存新闻内容
    # ********** Begin ********** #

    for url in urls:
        news = {}
        mainbody.append(news)
        response = requests.get(url).content.decode('UTF-8')
        tree = etree.HTML(response) # type: etree.ElementBase

    # 获取图片的url
        for element in tree.xpath(
                r"//section[@class='container']//img"):  # type: etree.ElementBase
            news.setdefault('imgurl', []).append(element.get('src'))

    # 获取标题并去除首尾空格
        for element in tree.xpath(
                r"//section[@class='container']//h1"):  # type: etree.ElementBase
            news['title'] = element.text.strip()

    # 获取作者并去除首尾空格
        for element in tree.xpath(
                r"//section[@class='container']//span[@class='appellation']"):  # type: etree.ElementBase
            news['author'] = element.text.strip()

    # 获取正文并去除首尾空格
        for element in tree.xpath(
                r"//section[@class='container']//div[@class='clipboard_text']"):  # type: etree.ElementBase
            news['content'] = element.xpath('string()').strip()
    # ********** End ********** #

    return mainbody



if __name__ == "__main__":
    urls = geturls()
    parsepage(urls)
