import re
from multiprocessing import Pool
import pymysql
import requests
from lxml import etree

def download_url(url):
    headers = {
        'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
            'Chrome/81.0.4044.122 Safari/537.36'
    }
    html = requests.get(url, headers)
    html.encoding = "gbk"
    data = etree.HTML(html.text)
    return data

def judge_links(url, link_list):
    new_links_list = []
    for i in link_list:
        if i[0] != "/":
            if '/index.html' in url:
                new_link = url.replace('index.html', i)
            else:
                new_link = url + '/' + i
        else:
            if '/index.html' in url:
                new_link = url.replace('/index.html', i)
            else:
                new_link = url + i
        new_links_list.append(new_link)
    return new_links_list

def chapters_title(url):
    html = download_url(url)
    chap_titles = html.xpath('//table[2]/tbody/tr/td/a/text()')
    chap_link = html.xpath('//table[2]/tbody/tr/td/a/@href')
    new_chap_links = judge_links(url, chap_link)
    return chap_titles,new_chap_links

def chapters_title2(url):
    html = download_url(url)
    chap_titles = html.xpath('//div[2]/div/dl/dd/a/text()')
    chap_link = html.xpath('//div[2]/div/dl/dd/a/@href')
    new_chap_links = judge_links(url, chap_link)
    return chap_titles, new_chap_links

def novel_collect(url):
    html = download_url(url)
    cata_names = html.xpath('//tr/td/table/tbody/tr/td/strong/a/font/text()')
    cata_link = html.xpath(
        '//tr/td/table/tbody/tr/td/strong/a[contains(@href,"book")]/@href')
    cata_links = judge_links("https://www.kanunu8.com", cata_link)
    return cata_names,cata_links

def authors(url):
    i = 1
    author_link = []
    authors_name = []
    while 1:
        html = download_url(url + "/files/writer/18-{}.html".format(i))
        cata = html.xpath('//tr[1]/td/table/tr/td/table/tr/td/a/@href')
        tem = html.xpath('//tr[1]/td/table/tr/td/table/tr/td/a/text()')
        author_link += cata
        authors_name += tem
        i += 1
        if len(cata) == 0:
            break
    author_links = judge_links(url, author_link)
    return authors_name,author_links


def regular(url):
    html = download_url(url)
    try:
        chapter = html.xpath('/html/body/div/table[4]/tr[1]/td/strong/font/text()')
        chapter_0 = html.xpath('//div[2]/div[2]/div/h1/text()')
        article = ''.join(html.xpath('/html/body/div/table[5]/tr/td[2]/p/text()'))
        article_0 =''.join(html.xpath('//div[2]/div[2]/div/div/p/text()'))
        if chapter ==[]:
            return chapter_0[0].replace('\xa0\xa0', ''),article_0
        else:
            return chapter[0].replace('\xa0\xa0', ''),article
    except Exception as e:
        print("爬取失败", str(e))

def create_tables():
    db = pymysql.connect(host="localhost", user="root", password="root", database="xiaoshuo", charset='utf8')
    cur = db.cursor()
    cur.execute("drop table if exists authors;")
    sql1 = """CREATE TABLE authors (
            id  int(255)  primary key,
            author varchar(255) NOT NULL,
            novels text );"""
    cur.execute(sql1)
    cur.execute("drop table if exists Work_collection;")
    sql2 = """CREATE TABLE Work_collection (
            novel varchar(255) ,
            author varchar(255) NOT NULL,
            chapter varchar(255) ,
            content LONGTEXT);"""
    cur.execute(sql2)
    sql3 = """CREATE TABLE author_novels (
            author varchar(255) NOT NULL,
            novel varchar(255));"""
    cur.execute(sql3)

def insert_authors(authors_name, author_links):
    db = pymysql.connect(host="localhost", user="root", password="root", database="xiaoshuo", charset='utf8')
    cur = db.cursor()
    for i in range(len(authors_name)):
        id = i + 1
        cata_names, cata_links = novel_collect(author_links[i])
        sql = '''INSERT ignore into authors VALUES ('{id}','{作者}','{作品}')'''.format(id=id, 作者=authors_name[i],作品='||'.join(cata_names))
        try:
            cur.execute(sql)
            db.commit()
            print('插入{}完成'.format(authors_name[i]))
        except Exception as e:
            print("插入失败",str(e))

def insert_novels(authors_name, author_links):
    db = pymysql.connect(host="localhost", user="root", password="root", database="xiaoshuo", charset='utf8')
    cur = db.cursor()
    for i in range(len(authors_name)):
        id = i + 1
        cata_names, cata_links = novel_collect(author_links[i])
        author_name = authors_name[i]
        for j in range(len(cata_names)):
            cata_name = cata_names[j]
            sql = '''INSERT ignore into author_novels VALUES ('{作者}','{作品}')'''.format(作者=author_name,作品=cata_name)
            try:
                cur.execute(sql)
                db.commit()
                print('插入{}完成'.format(author_name))
            except Exception as e:
                print("插入失败", str(e))

def insert_content(authors_name, author_links):
    db = pymysql.connect(host="localhost", user="root", password="root", database="xiaoshuo", charset='utf8')
    cur = db.cursor()
    for i in range(len(authors_name)):
        id = i + 1
        cata_names, cata_links = novel_collect(author_links[i])
        author_name = authors_name[i]
        for j in range(len(cata_names)):
            cata_name = cata_names[j]
            if 'index.html' in cata_links[j]:
                chap_titles, new_chap_links = chapters_title(cata_links[j])
            else:
                chap_titles, new_chap_links = chapters_title2(cata_links[j])
            if len(chap_titles) != 0:
                for n in range(len(chap_titles)):
                    try:
                        title,content = regular(new_chap_links[n])
                        if len(title) != 0:
                            sql = '''INSERT ignore into Work_collection VALUES ('{作品}', '{作者}','{章节}','{正文}');'''.format(作品=cata_name,作者=author_name, 章节=title,正文=content)
                            try:
                                cur.execute(sql)
                                db.commit()
                                print('插入{}完成'.format(authors_name[i]))
                            except Exception as e:
                                print("插入失败", str(e))
                        else:
                            print("爬取失败", str(e))
                    except Exception as e:
                        print("爬取失败", str(e))
            else:
                print("爬取{x}失败...\n已跳过".format(x=cata_names[j]))


if __name__ == '__main__':
    db = pymysql.connect(host="localhost", user="root", password="root", database="xiaoshuo", charset='utf8')
    cur = db.cursor()
    authors_name,author_links = authors("https://www.kanunu8.com")
    create_tables()
    insert_authors(authors_name,author_links)
    insert_content(authors_name,author_links)
    insert_novels(authors_name,author_links)
    cur.close()
    db.close()