import requests  # 爬取至三级页面（极其不稳定）
from lxml import etree
from fake_useragent import UserAgent
import os
import time

headers = {
    # "User-Agent": UserAgent().random
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"
}


def FileSave(save_path, filename, results):
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    path = save_path + "/" + filename + ".txt"
    with open(path, 'w+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\t%s\n" % (i[0], i[1]))


def FileSave2(save_path, filename, results):
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    path = save_path + "/" + filename + ".txt"
    with open(path, 'w+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\t%s\t%s\n" % (i[0], i[1], i[2]))


def Page_Level(myPage):  # 一级页面：频道名称和频道url
    dom = etree.HTML(myPage)
    channel_names = dom.xpath('//div[@class="titleBar"]/h2/text()')
    # ['新闻','娱乐',...'体育']
    channel_urls = dom.xpath('//div[@class="more"]/a/@href')
    # ['url','url',...,'url']
    # print(channel_names)
    # print(channel_urls)
    zip_name_urls = []
    for i in range(len(channel_names)):
        zip_name_urls = zip_name_urls + [(channel_names[i], channel_urls[i])]

    return zip_name_urls


def Page_Level2(chennel_html):  # 二级页面：抽取每个频道下的新闻的 标题 和 URL 爬出来
    dom = etree.HTML(chennel_html)
    news_titles = dom.xpath('//tr/td/a/text()')
    # ['新闻','娱乐',...'体育']
    news_urls = dom.xpath('//tr/td/a/@href')
    # ['url','url','url',...'']
    assert (len(news_titles) == len(news_urls))
    text = []
    for i in range(len(news_urls)):
        page = requests.get(news_urls[i], headers=headers, stream=True, timeout=10).content.decode("gbk",
                                                                                                   "ignore")  # 返回网页html源码
        text1 = Page_Level3(page)
        text2 = More_Pages(page)
        text3 = text1 + text2
        text.append(text3)
    zip_names_urls = []
    for i in range(len(news_titles)):
        zip_names_urls = zip_names_urls + [(news_titles[i], news_urls[i], text[i])]
    return zip(news_titles, news_urls, zip_names_urls)


def Page_Level3(page):  # 抽取每条新闻的正文
    dom = etree.HTML(page)
    content = dom.xpath('//div[@class="post_text"]/p[not(@*)]/text() | //div[@id="endText"]/p[not(@*)]/text()')
    text = ""
    if len(content) > 0:
        for i in content[:]:
            line = "".join(i)
            if line.strip() is not None:
                text = text + line.strip().replace(' ', '')  # 去掉每行中间的空格
    return text


def More_Pages(page):
    if '分页' in page:
        text = ''
        dom = etree.HTML(page)
        urls = dom.xpath('//tr/td/a[position()<last()-1]/following-sibling::a[1]/@href')
        for url in urls:
            print(url)
            page2 = requests.get(url).content.decode("gbk", 'ignore')
            text = text + ''.join(Page_Level3(page2))
        return text
    else:
        return 'None'


def spider(url):
    i = 1
    print("downloading ", url)
    myPage = requests.get(url, headers=headers).content.decode("gbk", "ignore")  # 返回网页html源码
    # print(myPage)
    page_level_results = Page_Level(myPage)
    save_path = 'H:\\python作业\\爬虫\\'
    filename = str(i) + "_" + u"新闻频道"
    # print(page_level_results)
    FileSave(save_path, filename, page_level_results)
    # 将频道名称和频道url写入文件“新闻频道.txt”中
    i += 1
    for name, url in page_level_results:  # url为频道的网址
        page = requests.get(url).content.decode("gbk", 'ignore')
        # page为每个频道的html源码
        newPageReults = Page_Level2(page)
        filename = str(i) + "_" + name
        FileSave2(save_path, filename, newPageReults)
        i += 1


if __name__ == "__main__":
    start = time.time()
    print('start......')
    start_url = "http://news.163.com/rank/"
    spider(start_url)
    print('end')
    end = time.time()
    print('爬虫运行时间为%.4f秒' % (end - start))
