# -*- coding: utf-8 -*-
import random
from urllib.request import quote
from time import sleep
import os
import requests
from lxml import etree
from retrying import retry


def get_ua():
    first_num = random.randint(55, 62)
    third_num = random.randint(0, 3200)
    fourth_num = random.randint(0, 140)
    os_type = [
        '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)',
        '(Macintosh; Intel Mac OS X 10_12_6)'
    ]
    chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)

    ua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',
                   '(KHTML, like Gecko)', chrome_version, 'Safari/537.36']
                  )
    return ua


# 随机User-Agent
UA = get_ua()
headers = {
    "User-Agent": UA,
    "Accept-Encoding": "gzip, deflate, br",
    "Host": "www.baidu.com",
    "Upgrade-Insecure-Requests": "1"
}


def make_dir(path):
    os.mkdir(path)

@retry(stop_max_attempt_number=5)
def parse_url(url):
    resp = requests.get(url, headers=headers, timeout=30)
    if resp.status_code != 200:
        sleep(3)
        raise Exception
    html = etree.HTML(resp.content.decode())
    return html


def parse_data(search_url, wd, path, i):
    try:
        # 发送请求
        html = parse_url(search_url)
    except:
        print("百度请求失败,请重新运行")
        return
    # 提取每篇内容主标签
    contents = html.xpath("//div[@id='content_left']/div")
    for con in contents:
        # 提取每篇内容标题
        title_list = con.xpath("./h3/a//text()")
        # 提取每篇内容url
        href = con.xpath("./h3/a/@href")
        if title_list:
            idx = contents.index(con)
            with open(path + "/{}_{}.txt".format(i, idx+1), "a+", encoding="utf-8-sig") as f:
                # 保存url
                if href:
                    f.write(href[0] + "\n")
                title = "".join(title_list)
                f.write(title.strip() + "\n")
                content = html.xpath("//div[@id='content_left']/div[{}]//div[@class='c-span18 c-span-last']//text()".format(idx+1))
                # content = "".join(content)
                for c in content:
                    f.write(c)
    return html


def run(search_url, wd, path):
    for i in range(10):
        html = parse_data(search_url, wd, path, i+1)
        # 获取下一页url,翻页
        search_url = html.xpath("//a[text()='下一页>']/@href")
        if not search_url:
            break
        search_url = "https://www.baidu.com" + search_url[0]
        sleep(5)


# 运行入口
if __name__ == '__main__':
    search_url = "https://www.baidu.com/s?wd={}"
    # 获取关键词
    wd = input("请输入关键词:")
    # 创建目录
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    path = BASE_DIR + "/{}(百度搜索)".format(wd)
    try:
        os.mkdir(path)
    except:
        # print("文件夹已存在,请删除后再重新运行脚本")
        pass
    # 拼接url路径
    search_url = search_url.format(quote(wd))
    run(search_url, wd, path)
