# -*- coding: utf-8 -*-
from selenium import webdriver
from lxml import etree
import os
import time

# 全局配置
save_path = 'D:\\桌面'  # 文件路径
filename = "结果集"


# 保存为TXT文档
def FileSave(results):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    path = save_path + os.path.sep + filename + ".txt"
    with open(path, 'a+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\n" % (i))


# 一级页面爬取
def Page_Level(myPage):  # 一级页面
    dom = etree.HTML(myPage)
    results = []
    channel_names = dom.xpath('//*[@id="content_left"]/div')
    for channel in channel_names:
        try:
            summary = channel.xpath('div[@class="c-abstract"]/text()')
            title_link = channel.xpath('div[@class="f13"]/div[1]/@data-tools')
            print(title_link[0])
            results.append("【" + title_link[0] + "\n" + str(summary) + "】\n\n")
        except:
            pass
    return results


# 爬虫
def spider(myPage):
    results = Page_Level(myPage)  # 爬取
    # 选择保存方式
    FileSave(results)


# 运行
if __name__ == "__main__":
    key_world = input("请输入您想要搜索的内容：")
    num = int(input("请输入您想要爬取的页数："))
    filename = input("请输入您想要存储的文件名：")
    urls = []  # 页码索引列表
    for i in range(num):
        url = "https://www.baidu.com/s?wd=" + key_world + "&pn=" + str(
            i * 10) + "&oq=" + key_world + "&ie=utf-8&rsv_pq=da6a000c0001b80d&rsv_t=d7f5m3K7D2ij87xFqs1%2FBxTHWIIxqW6xVfayx6TdZRvpTSctGF1ObM1gLKs"
        urls.append(url)

    driver = webdriver.Firefox()
    for u in urls:
        driver.get(u)
        myPage = driver.page_source  # 获取源码
        spider(myPage)
        time.sleep(2)
    driver.quit()
