# -*- coding: utf-8 -*-
import re

import requests
from bs4 import BeautifulSoup as sp
import os
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from urllib.parse import urljoin
import pymysql


def connectDB():
    config = {
        "host": "localhost",  # 主机地址
        "user": "root",  # 用户名
        "password": "123456",  # 密码
        "database": "text",  # 数据库名称
        "port": 3306,  # 端口号
        "charset": "utf8"  # 编码
    }
    conn = pymysql.connect(**config)
    return conn

# 数据库建表
def createTale():
    conn = connectDB()    # 获取连接
    try:
        cursor = conn.cursor()   # 获取游标
        cursor.execute("drop table if exists novel")  # 检查先前是否存在user表
        cursor.execute("create table novel("
                       "id int(6) not null primary key,"
                       "title varchar(30),"
                       "content TEXT)")
        conn.commit()   # 连接数据库提交
    except:
        print("建表失败")
        raise
    finally:
        cursor.close()  # 释放游标对象
        conn.close()   # 释放连接对象

# 请求HTML
def getHTMLText(url, timeout=3000) -> str:
    try:
        r = requests.get(url, timeout=timeout)
        r.raise_for_status()

        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print("Requeste Error: ", format(e))
        return None


# 获取小说章节列表的链接和章节名
def getUrls(baseUrl='https://www.kanunu8.com/book3/6633/', timeout=30) -> list:
    urls = []
    text = getHTMLText(baseUrl, timeout)
    if text is not None:
        soup = sp(text, 'lxml')
        table = soup.find('table', {'cellspacing': 1, 'bgcolor': '#d4d0c8'})
        alist = table.findAll('a')
        urls = [(a.get_text().strip(), urljoin(baseUrl, a.attrs['href']))
                for a in alist]
    return urls


# 解析小说内容
def parse(text: str) -> (str, str):
    title = ''
    context = ''
    soup = sp(text, 'lxml')
    tag = soup.p.contents
    title = ''.join(soup.find('font'))
    context = ''.join([x for x in tag if len(x) > 5])
    return (title, context)


# 线程入口
def MyPare(url,n):
    conn = connectDB()
    cursor = conn.cursor()
    title = ''
    text = getHTMLText(url)
    if text == None:
        return url + ' Failed '
    try:
        title, content = parse(text)
        ids = re.findall(r"\d+", title)
        if len(ids) == 0:
            id = n
        else:
            id = ids[0]
        try:
            cursor.execute("insert into novel(id,title,content) values ('%s','%s','%s')" % (id, title, content))
            conn.commit()
        except Exception as e:
            print(title + '写入失败',format(e))
        finally:
            cursor.close()
            conn.close()
    except Exception as e:
        print('Error : ' + url, format(e))
    return title


def main(baseUrl):
    with ThreadPoolExecutor(max_workers=8) as t:
        executor = ThreadPoolExecutor(8)
        futures = []

        begin = time.time()
        n = 0
        for title, url in getUrls(baseUrl):
            n += 1
            futures.append(executor.submit(MyPare, url,n))

        times = time.time() - begin
        print("所有线程开始: ", times)
        for f in as_completed(futures):
            times = time.time() - begin
            print(f.result(), " Done: ", times)

        times = time.time() - begin
        print("ALL Done: ", times)


if __name__ == '__main__':
    createTale()
    # 小说目录页面
    baseUrl = r'https://www.kanunu8.com/book3/6633/'

    main(baseUrl)
