import csv
import os
import re

import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

ua = UserAgent().firefox

# proxies = {
#     "http": "http://121.4.103.166:7890"
# }
headers = {
    'User-Agent': ua
}


def transfer_date(str):
    d_prefix_len_ovm = len(str.split('-')[0]) == 1
    h_prefix_len_ovm = len(re.findall(" (.*?):", str)[0]) == 1
    return str


def save_to_local(list, folder_path, filename):
    print("准备保存到本地...")

    # 提前自动创建文件夹  防止文件不存在报错
    if not os.path.exists(folder_path): os.makedirs(folder_path)

    with open(folder_path + filename, 'w', encoding='utf-8') as file:
        csv_writer = csv.writer(file)
        # 写表头
        csv_writer.writerow(["小说名称", "最新章节标题", "作者", "更新时辰"])
        # 写表体
        for o in list:
            csv_writer.writerow([o.get('name'), o.get('last_sentence'), o.get('author'), o.get('update_time')])
    print("保存成功!")
    return


def get_bookes():
    print("开始")

    list = []
    # 1~95 将到 94 结束
    for i in range(1, 95):
        target = 'https://www.qb5.tw/fenlei/5_{}/'.format(i)
        res = requests.get(target, headers=headers)
        soup = BeautifulSoup(res.text, 'lxml')

        data = soup.select('#tlist>ul>li')

        for li in data:
            dict = {
                'name': '',
                'last_sentence': '',
                'author': '',
                'update_time': '',
            }
            sequence = BeautifulSoup(str(li), 'lxml')
            s1 = li.select(".zp>a")[0].get("title")
            s2 = li.select(".zz>a")[0].get("title")
            s3 = li.select('.author')[0].get_text()
            s4 = transfer_date(li.select('.sj')[0].get_text())

            dict['name'] = s1
            dict['last_sentence'] = s2
            dict['author'] = s3
            dict['update_time'] = s4
            list.append(dict)
        print("目标网址: {} 爬取结束".format(target))
    # 输出 list 到  csv
    save_to_local(list, "../../resources/csv/", "wanbenbooks.csv")


if __name__ == '__main__':
    get_bookes()
