#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author  : hu_cl
# @Contact : 760730895@qq.com 
# @Date    : 2020/9/27 17:12
# @File    : houweidong.py
import random
from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
import time

req_url_base = 'https://www.libaiwu.com/'  # 小说主地址
req_url = req_url_base + "hwd/"  # 单独一本小说地址
req_header = {
    "authority": "www.libaiwu.com",
    "method": "GET",
    "scheme": "https",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,"
              "image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "max-age=0",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "cookie": "__cfduid=df1f2afb8899068b5edf38f28c590f32e1601197273;"
              " Hm_lvt_98f94e2ffe6382db2718ef2e438d6a91=1601197286; "
              "__gads=ID=198c7ba7d95c8e9e:T=1601197299:S=ALNI_MbZIG_EUDs3q7IrFm2_089qmCelrQ;"
              " Hm_lpvt_98f94e2ffe6382db2718ef2e438d6a91=1601197762",
    }

user_agent_list = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
]


def down_chart(key, value, i):
    print(f"{key}开始下载，请稍等。。。。{i}")
    req_header['User-Agent'] = random.choice(user_agent_list)
    name, link = key, value
    try:
        r_chart = requests.get(value, params=req_header)
        r_chart.encoding = 'utf-8'
        soup_chart = BeautifulSoup(r_chart.text, "html.parser")
        section_text = soup_chart.select('.entry-text p')
        section_text.pop(0)
        mycallback(key, section_text, i)
    except Exception as e:
        print(f'错误信息为{e},{name},{link},{i}')
        time.sleep(8)
        down_chart(name, link, i)


def mycallback(key, section_text, i):
    title = 'E:\小说\侯卫东官场笔记(官路风流).txt'
    f = open(title, 'a+', encoding='utf8')
    f.writelines("\n" + key + "\n\n")
    for t in section_text:
        if len(t) > 0:
            f.writelines(t.get_text() + "\n")
    f.close()
    print(f"{key}下载完毕，正在继续下载下一章")
    time.sleep(1)
    print('------------------------------------')


if __name__ == '__main__':
    pool = Pool(1)
    req_header['User-Agent'] = random.choice(user_agent_list)
    r = requests.get(req_url, params=req_header)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, "html.parser")
    section_links = soup.select('.main .entry-text.clearfix .xsbox a[href]')
    chart = {}
    for data in section_links:
        chart[data.get('title')] = data.get('href')
    chart.pop('侯卫东官场笔记 1223章 1222章以后 1224 1225')
    chart.pop(None)
    i = 0
    for key, value in chart.items():
        i = i + 1
        if i >= 1003:
            pool.apply_async(down_chart, (key, value, i))
    pool.close()
    pool.join()
    print("小说下载完毕")
