import requests
from lxml import etree


header = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'
    }
def getUrls():
    """
    :return: 返回一个带有五个新闻详情页url的列表 
    """
    homeUrl = 'http://news.jiangnan.edu.cn/yw.htm'

    html = requests.get(url=homeUrl, headers=header).content.decode('utf-8')

    selector = etree.HTML(html)

    newsPart = selector.xpath("//ul[@class='nobt']")[0]
    # print(etree.tostring(newsPart,encoding='utf-8').decode('utf-8'))
    newsUrls = newsPart.xpath(".//a/@href")[0: 5]  # 只保留前5条

    newsUrls = list(map(lambda x: 'http://news.jiangnan.edu.cn/' + x, newsUrls))
    return newsUrls


# def getNews():
#     News = []
#     for url in getUrls().__iter__:
#         print(url)
#         # url = "http://news.jiangnan.edu.cn/info/1002/54608.htm"
#         content = requests.get(url=url, headers=header).content.decode('utf-8')
#         selector = etree.HTML(content)
#         news = selector.xpath("//form[@name='_newscontent_fromname']")[0]
#         # print(etree.tostring(news, encoding='utf-8').decode('utf-8'))
#
#         # 标题和发布时间
#         '''
#         发布日期：2018-06-15
#         '''
#         con_title = news.xpath("./div[@class='con_title']")[0]
#         title = con_title.xpath("./h3/text()")[0]
#         pubTime = str(con_title.xpath("./span/text()")[0]).strip().split()[0]
#
#         # 新闻内容
#         newsPart = news.xpath(".//div[@class='v_news_content']")[0]
#         newsContent = newsPart.xpath(".//p[not(@style='text-align: center;')]/text()")
#
#         # 将所有段落拼在一起
#         content = []
#         for p in newsContent:
#             # p = etree.tostring(newss, encoding='utf-8').decode('utf-8')
#             content.append("     "+p)
#
#         content = "\n".join(content)
#         news = {
#             'title': title,
#             'pubTime': pubTime,
#             'content': content
#         }
#         News.append(news)
#     return News
def getNews():
    """
    :return:返回存放所有新闻的列表 
     列表中每个元素是一个字典 键包括 标题、发表日期和新闻内容
    """
    # 存放所有的新闻列表
    News = []
    for url in getUrls():
        # url = "http://news.jiangnan.edu.cn/info/1002/54608.htm"
        content = requests.get(url=url, headers=header).content.decode('utf-8')
        selector = etree.HTML(content)
        news = selector.xpath("//form[@name='_newscontent_fromname']")[0]
        # print(etree.tostring(news, encoding='utf-8').decode('utf-8'))

        # 标题和发布时间
        '''
        发布日期：2018-06-15
        '''
        con_title = news.xpath("./div[@class='con_title']")[0]
        title = str(con_title.xpath("./h3/text()")[0])
        pubTime = str(con_title.xpath("./span/text()")[0]).strip().split()[0]

        # 新闻内容
        newsPart = news.xpath(".//div[@class='v_news_content']")[0]
        newsContent = newsPart.xpath(".//p[not(@style='text-align: center;')]/text()")

        # 将所有段落拼在一起
        content = []
        for p in newsContent:
            # p = etree.tostring(newss, encoding='utf-8').decode('utf-8')
            content.append("     "+p)
        #     首行缩进

        content = "\n".join(content)
        news = {
            'title': title,
            'pubTime': pubTime,
            'content': content
        }
        News.append(news)
    print("获取到新闻列表")
    return News
