import pymysql
import requests
import lxml.html
import re
import tornado.ioloop
import tornado.web
from multiprocessing.dummy import Pool
from concurrent.futures import ThreadPoolExecutor, as_completed


def get_html(url):
        headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0"}
        response = requests.get(url, headers=headers)
        response.encoding = response.apparent_encoding
        return response.text


# 获取所有作家页面的全部网址
def get_writer_url(url):
    text = get_html(url)
    selector = lxml.etree.HTML(text, lxml.etree.HTMLParser())
    # print(selector)
    url_list = selector.xpath('/html/body/div/table/tr/td[2]/table[2]/tr/td/a/@href')
    # print(url_list[-1])
    # print(re.findall('-(\d+)', url_list[-1]))
    last_url_num = re.findall('-(\d+)', url_list[-1])
    writer_url_list = []
    for i in range(int(last_url_num[0])):
        url = 'https://www.kanunu8.com/files/writer/18-{}.html'.format(i + 1)
        writer_url_list.append(url)
    # print(writer_url_list)
    return writer_url_list


# 获取所有作家页面中的作品集名称和对应的网址
def get_url_list(homePageUrl):
    text = get_html(homePageUrl)
    selector = lxml.etree.HTML(text, lxml.etree.HTMLParser())
    # print(selector)
    url_list = selector.xpath('/html/body/div/table/tr/td/table/tr/td/table/tr/td/table/tr[1]/td[2]/a/@href')
    # 获取作家名字
    name_list = selector.xpath('/html/body/div/table/tr/td/table/tr/td/table/tr/td/table/tr[1]/td[2]/a/text()')
    # print(url_list)
    # print(len(name_list))
    urlss = []
    for i in range(len(url_list)):
        url = 'https://www.kanunu8.com' + url_list[i]
        urlss.append(url)
    # print(urlss)
    return (urlss, name_list)


# 获取作品集中的作品名称及对应的网址
def get_Work_collection_list(Work_collection_url):
    try:
        text = get_html(Work_collection_url)
        selector = lxml.etree.HTML(text, lxml.etree.HTMLParser())
        # print(selector)
        url_list = selector.xpath('//td[@class="p10-24"]/strong/a/@href')
        if url_list == []:
            url_list = selector.xpath('//td[@class="p10-21"]/strong/a/@href')
        # url_list = selector.xpath('//td[@class="p10-21"]/strong/a/@href')
        name_list = selector.xpath('//td/strong/a/font/text()')
        name_list1 = []
        for i in name_list:
            name_list1.append("{}{}{}{}".format('《', i, '》', '|'))
        # print(url_list)
        # print(name_list)
        Work_collection_list = []
        for i in url_list:
            Work_collection_list.append('https://www.kanunu8.com{}'.format(i))
        return (Work_collection_list, name_list1)
    except:
        print("pass1")


#  解析小说章节的链接和名称
def get_works(works_url):
    try:
        text = get_html(works_url)
        selector = lxml.etree.HTML(text, lxml.etree.HTMLParser())
        # print(selector)
        urls = selector.xpath('/html/body/div/div/div/dl/dd/a/@href')
        # print(urls)
        title_url = []
        for i in range(len(urls)):
            url = works_url + urls[i]
            title_url.append(url)
        # print(urlss)
        chap_title_name = selector.xpath('/html/body/div/div/div/dl/dd/a/text()')
        if title_url == [] or chap_title_name == []:
            print("网页匹配内容为空——pass")
        else:
            # print((title_url, chap_title_name))
            return (title_url, chap_title_name)
    except:
        print("pass2")


# 获取小说内容
def get_txt(url):
    try:
        text = get_html(url)
        selector = lxml.etree.HTML(text, lxml.etree.HTMLParser())
        # 匹配标题
        # list1 = selector.xpath('/html/body/div/table[4]/tr[1]/td/strong/font/text()')
        list1 = selector.xpath('/html/body/div[2]/div[2]/div/h1/text()')
        str1 = "".join(list1)
        # 匹配正文
        # list2 = selector.xpath('/html/body/div/table[5]/tr/td[2]/p/text()')
        list2 = selector.xpath('/html/body/div[2]/div[2]/div/div/p/text()')
        str2 = "".join(list2)
        if str1 == '' or str2 == '':
            str1 = "".join(selector.xpath('/html/body/div/table[4]/tr[1]/td/h2/font/text()'))
            str2 = "".join(selector.xpath('/html/body/div/table[5]/tr/td[2]/p/text()'))
        return (str1, str2)
    except:
        return ('', '')


def jianbiao1():
    connect = pymysql.connect(host='localhost', port=3306, user='root', password='123456', database='frist_db')
    frist_sql = 'DROP TABLE IF EXISTS contents'
    create_sql = """CREATE TABLE contents(
                name VARCHAR(225),
                title VARCHAR(225) NOT NULL,
                content LONGTEXT )"""
    cursor = connect.cursor()
    cursor.execute('set sql_notes = 0')  # 关闭警告信息
    cursor.execute(frist_sql)
    cursor.execute(create_sql)
    cursor.close()
    connect.close()


def jianbiao2():
    connect = pymysql.connect(host='localhost', port=3306, user='root', password='123456', database='frist_db')
    frist_sql = 'DROP TABLE IF EXISTS authors'
    create_sql = """CREATE TABLE authors (
            author VARCHAR(225) not null ,
            works VARCHAR (1000))"""
    cursor = connect.cursor()
    cursor.execute('set sql_notes = 0')  # 关闭警告信息
    cursor.execute(frist_sql)
    cursor.execute(create_sql)
    cursor.close()
    connect.close()


# 将获取的数据存储到数据库中
def insert_data1(author, works):
    connect = pymysql.connect(host='localhost', port=3306, user='root', password='123456', database='frist_db')
    cursor = connect.cursor()
    num1_sql = "INSERT INTO authors(author,works) VALUES(%s,%s)"
    data = [(author, works)]
    cursor.executemany(num1_sql, data)
    connect.commit()
    cursor.close()
    connect.close()


def insert_data2(author, works):
    connect = pymysql.connect(host='localhost', port=3306, user='root', password='123456', database='frist_db')
    cursor = connect.cursor()
    num1_sql = "INSERT INTO contents(name,title,content) VALUES(%s,%s,%s)"
    data = [(author, works)]
    cursor.executemany(num1_sql, data)
    connect.commit()
    cursor.close()
    connect.close()

# 少量数据，部分功能测试
def crawling_test(url):
    jianbiao1()
    jianbiao2()
    list1 = get_url_list(url)
    # print(list1)
    for i in range(len(list1[0])):
        author = "".join(list1[1][i])
        list2 = get_Work_collection_list(list1[0][i])
        works = "".join(list2[1])
        insert_data1(author, works)
        # print(list2[1])



class MenuHandler(tornado.web.RequestHandler):
    def get(self, workid):
        # todo
        # 1.链接mysql
        # 2.打开小说数据库
        # 3.Select 章节名 from 小说表
        cnx = pymysql.connect(host='localhost', user='root', password='123456', db='novel', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT title FROM contents where name = %s")
        cursor.execute(query, (workid,))
        items = []
        for (chap) in cursor:
            items.append([chap[0]])
        cursor.close()
        cnx.close()
        self.render("menu.html", title=workid, items=items)


class ChapterHandler(tornado.web.RequestHandler):
    def get(self, chapid):
        cnx = pymysql.connect(host='localhost', user='root', password='123456', db='novel', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT title,content FROM contents WHERE title = %s ")
        cursor.execute(query, (chapid,))
        chapinfo = {}
        for (chap, chapcnt) in cursor:
            chapinfo["title"] = chap
            para = chapcnt.split("\r\n")
            chapinfo["content"] = para
        cursor.close()
        cnx.close()
        self.render("content.html", item=chapinfo)


class worksHandler(tornado.web.RequestHandler):
    def get(self, autherid):
        cnx = pymysql.connect(host='localhost', user='root', password='123456', db='novel', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT author,works FROM authors WHERE author = %s ")
        cursor.execute(query, (autherid,))
        chapinfo = {}
        for (author, works) in cursor:
            chapinfo["author"] = author
            works = works.split("|")
            chapinfo["works"] = works
        cursor.close()
        cnx.close()
        self.render("works.html", item=chapinfo)


class authorsHandler(tornado.web.RequestHandler):
    def get(self):
        cnx = pymysql.connect(host='localhost', user='root', passwd='123456', db='novel', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT author FROM authors  ")
        cursor.execute(query)
        items = []
        for (auth) in cursor:
            items.append([auth[0]])
        cursor.close()
        cnx.close()
        self.render("authors.html", title="", items=items)


def make_app():
    return tornado.web.Application([
        (r"/menu/(.*)", MenuHandler),
        (r"/chap/(.*)", ChapterHandler),
        (r'/works/(.*)', worksHandler),
        (r"/", authorsHandler),
    ])


def main():
    # crawling_test('https://www.kanunu8.com/files/writer/')
    app = make_app()
    app.listen(7778)
    tornado.ioloop.IOLoop.current().start()
    # print(list1)
    # try:
    #     pool = Pool(processes=10)
    #     output = pool.map(Multi_thread_crawling, list1)
    # except:
    #     print("pass3")


if __name__ == "__main__":
    main()