from concurrent.futures import ProcessPoolExecutor, as_completed
import csv
import os
import re

import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

ua = UserAgent().firefox

# proxies = {
#     "http": "http://121.4.103.166:7890"
# }
headers = {
    'User-Agent': ua
}


def transfer_date(str):
    d_prefix_len_ovm = len(str.split('-')[0]) == 1
    h_prefix_len_ovm = len(re.findall(" (.*?):", str)[0]) == 1
    return str


def save_to_local(list, folder_path, filename):
    print("准备保存到本地...")

    # 提前自动创建文件夹  防止文件不存在报错
    if not os.path.exists(folder_path): os.makedirs(folder_path)

    with open(folder_path + filename, 'w', encoding='utf-8') as file:
        csv_writer = csv.writer(file)
        # 写表头
        csv_writer.writerow(["小说名称", "最新章节标题", "作者", "更新时辰"])
        # 写表体
        for o in list:
            csv_writer.writerow([o.get('name'), o.get('last_sentence'), o.get('author'), o.get('update_time')])
    print("保存成功!")
    return


def get_target_urls():
    target_url_list = []
    for i in range(1, 95):
        target_url_list.append('https://www.qb5.tw/fenlei/5_{}/'.format(i))
    return target_url_list


def concurrent_get_books():
    all_books = []
    futures = []
    urls = get_target_urls()
    # 多线程抓取
    with ProcessPoolExecutor(max_workers=50) as pool:
        for url in urls:
            fu = pool.submit(get_books_cf, url)
            futures.append(fu)
    for f in as_completed(futures):
        all_books.extend(f.result())

    # 一次写入
    save_to_local(all_books, "../../resources/csv/", "wanbenbooks.csv")


def get_books_cf(target):
    res = requests.get(target, headers=headers)
    soup = BeautifulSoup(res.text, 'lxml')

    data = soup.select('#tlist>ul>li')
    list = []
    for li in data:
        dict = {
            'name': '',
            'last_sentence': '',
            'author': '',
            'update_time': '',
        }
        sequence = BeautifulSoup(str(li), 'lxml')
        s1 = li.select(".zp>a")[0].get("title")
        s2 = li.select(".zz>a")[0].get("title")
        s3 = li.select('.author')[0].get_text()
        s4 = transfer_date(li.select('.sj')[0].get_text())

        dict['name'] = s1
        dict['last_sentence'] = s2
        dict['author'] = s3
        dict['update_time'] = s4
        list.append(dict)

    print("目标网址: {} 爬取结束".format(target))
    return list


if __name__ == '__main__':
    concurrent_get_books()
