import requests  # 可爬取至二级页面
from lxml import etree
from fake_useragent import UserAgent
import os
import time

headers = {
    "User-Agent": UserAgent().random
}


def FileSave(save_path, filename, results):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    path = save_path + "/" + filename + ".txt"
    with open(path, 'w+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\t%s\n" % (i[0], i[1]))


def Page_Level(myPage):  # 一级页面：频道名称和频道url
    dom = etree.HTML(myPage)
    channel_names = dom.xpath('//div[@class="titleBar"]/h2/text()')
    # ['新闻','娱乐',...'体育']
    channel_urls = dom.xpath('//div[@class="more"]/a/@href')
    # ['url','url','url',...'']
    # print(channel_names)
    # print(channel_urls)
    zip_name_urls = []
    for i in range(len(channel_names)):
        zip_name_urls = zip_name_urls + [(channel_names[i], channel_urls[i])]

    return zip_name_urls


def Page_Level2(chennel_html):  # 二级页面：抽取每个频道下的新闻的 标题 和 URL 爬出来
    dom = etree.HTML(chennel_html)
    news_titles = dom.xpath('//tr/td/a/text()')
    # ['新闻','娱乐',...'体育']
    news_urls = dom.xpath('//tr/td/a/@href')
    # ['url','url','url',...'']
    #     assert(len(news_titles)==len(news_urls))
    return zip(news_titles, news_urls)


def spider(url):
    i = 1
    print("downloading ", url)
    myPage = requests.get(url, headers=headers).content.decode("gbk")  # 返回网页html源码
    # print(myPage)
    page_level_results = Page_Level(myPage)  # 爬取一级页面
    save_path = 'H:\\新建文件夹\\网易新闻\\'  # 保存内容
    filename = u"新闻频道"
    #     print(page_level_results)
    FileSave(save_path, filename, page_level_results)
    # 将频道名称和频道url写入文件“新闻频道.txt”中
    for name, url in page_level_results:
        page = requests.get(url).content.decode("gbk", "ignore")
        newPageReults = Page_Level2(page)  # 爬取二级页面
        page_name = str(i) + '_' + name
        FileSave(save_path, page_name, newPageReults)  # 保存内容
        i += 1


if __name__ == "__main__":
    start = time.time()
    print('start......')
    start_url = "http://news.163.com/rank/"
    spider(start_url)
    print('end')
    end = time.time()
    print('爬虫运行时间为%.4f秒' % (end - start))
