import requests
import time
from multiprocessing.dummy import Pool
import lxml.etree
import pymysql
import lxml.html



url = r"https://www.kanunu8.com/book3/6633/"

def get_html_source(url):
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0)"}
    html = requests.get(url, headers=headers).content.decode("GBK")
    # print(html)
    return html

def get_chapter_html(html,url):
    chapter_html_list = []
    selector = lxml.etree.HTML(html, lxml.etree.HTMLParser())
    chapter_html = selector.xpath('//tr/td/table[@cellpadding="8"]/tbody/tr/td/a/@href')
    # print(chapter_html)
    for i in chapter_html:
        chapter_html_list.append(url + i)
    # print(chapter_html_list)
    # for chapter_html in chapter_html_list:
    #     print(chapter_html)
    return chapter_html_list

#
def get_chapter(html):
# testurl= r"https://www.kanunu8.com/book3/6633/116007.html"
# testhtml = requests.get(url,headers=headers).content.decode("GBK")
# print(html)
    selector = lxml.etree.HTML(html, lxml.etree.HTMLParser())
    chap_title = selector.xpath('//font/text()')
    chap_title = chap_title[0]
    # print(chap_title)
    text = selector.xpath('//p/text()')
    # print(text)
    return chap_title, text

def create_db():
    my_db = pymysql.connect(host="localhost",user="root",password="123456",db="study")
    sql_create = """CREATE TABLE `qiuzhuang`  (
                    `chapter_id` int(12) NOT NULL AUTO_INCREMENT,
                    `chapter_title` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
                    `contents` text CHARACTER SET utf8 COLLATE utf8_general_ci NULL,
                    PRIMARY KEY (`chapter_id`) USING BTREE
                    )ENGINE = InnoDB AUTO_INCREMENT = 34 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;"""

    cursor = my_db.cursor()
    cursor.execute("DROP TABLE IF EXISTS qiuzhuang")
    cursor.execute(sql_create)
    my_db.commit()
    my_db.close()

def intoSql(chapter_url_list):
    my_db = pymysql.connect(host="localhost", user="root", password="123456", db="study")
    print("连接成功")
    cursor = my_db.cursor()
    for i in range(len(chapter_url_list)):
        html = get_html_source(chapter_url_list[i])
        chap_title,text = get_chapter(html)
        title_str = "".join(chap_title)
        txt_str ="".join(text)
        num_id = i+1
        sql = "INSERT into `qiuzhuang` (chapter_id,chapter_title,contents) values (%s,%s,%s)"
        txt_values = [(num_id,title_str,txt_str)]
        cursor.executemany(sql,txt_values)
        my_db.commit()
    print("导入成功")
    my_db.close()


#
#
def load_artcle():
    url = r"https://www.kanunu8.com/book3/6633/"
    html = get_html_source(url)
    chapter_url_list = get_chapter_html(html, url)
    start = time.time()
    pool = Pool(6)  # 定义6个线程
    pool.map(intoSql, [chapter_url_list])
    end = time.time()
    print(f"The time for all work(With thread):{end-start}")



#
if __name__ == '__main__':
    create_db()
    load_artcle()  # 主函数
