import re
import urllib.request
import urllib
import pymysql
import tornado.ioloop
import tornado.web
#爬取的部分主体分为四个步骤：爬取小说类、提取作家名称以及小说、获取小说章节名、获取章节内容
# 获取主网页
def writer():
    html = urllib.request.urlopen("https://www.kanunu8.com", timeout=30).read()
    html = html.decode("gbk")
    reg = ' ---- <a href="(.*?)">(.*?)</a>'
    reg = re.compile(reg)
    urls = re.findall(reg, html)
    return urls
    # print(urls)

# 以及作家分类列表链接
def writer_devide():  # 网址的粘合('https://www.kanunu8.com//author2.html', '外国作家')
    url1 = writer()
    con1 = []
    for i in range(len(url1)):
        ht = 'https://www.kanunu8.com' + '/' + str(url1[i][0])
        name = url1[i][1]
        tup = (ht, name)
        con1.append(tup)
    # print(con1)
    return con1

def connect():
    config = {
        "host": "localhost",
        "user": "root",
        "password": "123456",
        "database": "text",
    }
    conn = pymysql.connect(**config)
    return conn
def creat_db():
    conn = connect()
    cursor = conn.cursor()
    try:
        cursor.execute("drop table if exists AllNovel")
        cursor.execute("create table AllNovel("
                       "auther_name TEXT,"
                       "book_name TEXT,"
                       "chapter_name TEXT,"
                       "novel TEXT)")
        # cursor.execute("insert into AllNovel(auther_name,book_name,chapter_name,novel) values ('%s','%s','%s','%s')"%(auther_name,book_name,chapter_name,novel))
        conn.commit()
        print('建表成功')
    except:
        print('建表失败')
        raise
    # 关闭数据库连接
    finally:
        conn.close()
        cursor.close()

def writer_name():  # 提取作家名称以及小说
    conn = connect()
    cursor = conn.cursor()

    con1 = writer_devide()
    # print(con1)
    for i in range(len(con1)):
        big_writer_url = con1[i][0]
        big_writer_name = con1[i][1]
        # print(big_writer_url)
        big_writer_html = urllib.request.urlopen(big_writer_url).read()  # 正文内容源代码
        big_writer_html = big_writer_html.decode("gbk")
        # print(big_writer_html)
        reg = '<p><a href="(.*?)">(.*?)</a></p>'
        big_writer_reg = re.compile(reg, re.S)
        writer = re.findall(big_writer_reg, big_writer_html)
        # print(writer)
        con2 = []  # 网址的拼接
        for i in range(len(writer)):
            ht = 'https://www.kanunu8.com' + '/' + str(writer[i][0])
            name = writer[i][1]
            tup = (ht, name)
            con2.append(tup)
            # print(con2)
            for i in range(len(con2)):
                auther_url = con2[i][0]
                auther_name = con2[i][1]
                '''作家名称'''
            print("作者：", auther_name)
            try:
                '''作家小说集源代码，爬取作家小说链接和名字'''
                auther_html = urllib.request.urlopen(auther_url).read()
                auther_html = auther_html.decode("gbk")
                reg1 = '<a target="_blank" href="(.*?)"><font color="#dc143c" size="4">(.*?)</font></a>'
                auther_reg = re.compile(reg1, re.S)
                writer_chapter = re.findall(auther_reg, auther_html)

                '''正则表达式判断格式1艾萨克·阿西莫夫作品集'''
                if writer_chapter == []:
                    reg2 = '<strong>.*?<a target="_blank" href="(.*?)">(.*?)</a>.*?</strong>'
                    auther_reg = re.compile(reg2, re.S)
                    writer_chapter = re.findall(auther_reg, auther_html)

                '''排除过往代码里两种异常情况'''
                writer_chapter2 = writer_chapter[:]
                for (url, bookName) in writer_chapter:
                    '''删除过长的匹配'''
                    if len(url) > 50:
                        writer_chapter2.remove((url, bookName))
                    """在这里为提取的name名字过多，做再次正则表达式提取所需要的信息"""
                    if len(bookName) > 30:
                        reg_in = '<img .* alt="(.*?)" width=.*?'
                        auther_reg = re.compile(reg_in, re.S)
                        contentNeed = re.findall(auther_reg, bookName)
                        # print(contentNeed)
                        writer_chapter2.remove((url, bookName))
                        writer_chapter2.append((url, contentNeed[0]))
                writer_chapter = writer_chapter2

                # print(writer_chapter)
                con3 = []  # 网址的拼接
                for i in range(len(writer_chapter)):
                    ht = 'https://www.kanunu8.com' + '/' + str(writer_chapter[i][0])
                    name = writer_chapter[i][1]
                    tup = (ht, name)
                    con3.append(tup)
                for i in range(len(con3)):
                    book_url = con3[i][0]
                    book_name = con3[i][1]
                    '''书籍名称'''
                    print(book_name)

                    '''作家小说集源代码，爬取个体小说章节的链接和名称'''
                    book_html = urllib.request.urlopen(book_url,timeout=30).read()
                    book_html = book_html.decode("gbk")
                    reg1 = '<td><a href="(.*?)">(.*?)/a></td>'
                    book_reg = re.compile(reg1, re.S)
                    book_chapter = re.findall(book_reg, book_html)
                    # print(book_chapter)
                    #''' 网址的拼接'''
                    con4 = []
                    for i in range(len(book_chapter)):
                        ht = book_url + '/' + str(book_chapter[i][0])
                        name = book_chapter[i][1]
                        tup = (ht, name)
                        con4.append(tup)
                    # print(con4)
                    for i in range(len(con4)):
                        chapter_url = con4[i][0]
                        chapter_name = con4[i][1]
                        '''章节名称'''
                        # print(chapter_name)
                        '''爬取小说内容'''
                        # print(chapter_url)
                        chapter_html = urllib.request.urlopen(chapter_url).read()  # 作家小说集源代码，爬取个体小说url——name
                        chapter_html = chapter_html.decode("gbk")
                        reg3 = '&nbsp;&nbsp;&nbsp;&nbsp;(.*?)<br />'
                        chapter_reg = re.compile(reg3, re.S)
                        novel = re.findall(chapter_reg, chapter_html)[0]
                        print(chapter_name, '爬取成功')
                        # print(novel)
                        try:
                            cursor.execute("insert into AllNovel(auther_name,book_name,chapter_name,novel) values ('%s','%s','%s','%s')" % (auther_name, book_name, chapter_name, novel))
                            conn.commit()
                            print('存入MySQL成功')
                        except:
                            print('存入MySQL失败')
                            raise
            except:
                pass
            continue
creat_db()
writer_name()

class MenuHandler(tornado.web.RequestHandler):
    def get(self, workid):
        # todo
        # 1.链接mysql
        # 2.打开小说数据库
        # 3.Select 章节名 from 小说表
        cnx = pymysql.connect(host='localhost', user='root', password='123456', db='text', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT title FROM contents where name = %s")
        cursor.execute(query, (workid,))
        items = []
        for (chap) in cursor:
            items.append([chap[0]])
        cursor.close()
        cnx.close()
        self.render("menu.html", title=workid, items=items)


class ChapterHandler(tornado.web.RequestHandler):
    def get(self, chapid):
        cnx = pymysql.connect(host='localhost', user='root', password='123456', db='text', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT title,content FROM contents WHERE title = %s ")
        cursor.execute(query, (chapid,))
        chapinfo = {}
        for (chap, chapcnt) in cursor:
            chapinfo["title"] = chap
            para = chapcnt.split("\r\n")
            chapinfo["content"] = para
        cursor.close()
        cnx.close()
        self.render("content.html", item=chapinfo)


class worksHandler(tornado.web.RequestHandler):
    def get(self, autherid):
        cnx = pymysql.connect(host='localhost', user='root', password='123456', db='text', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT author,works FROM authors WHERE author = %s ")
        cursor.execute(query, (autherid,))
        chapinfo = {}
        for (author, works) in cursor:
            chapinfo["author"] = author
            works = works.split("|")
            chapinfo["works"] = works
        cursor.close()
        cnx.close()
        self.render("works.html", item=chapinfo)


class authorsHandler(tornado.web.RequestHandler):
    def get(self):
        cnx = pymysql.connect(host='localhost', user='root', passwd='123456', db='text', port=3306, charset='utf8')
        cursor = cnx.cursor()
        query = ("SELECT author FROM authors  ")
        cursor.execute(query)
        items = []
        for (auth) in cursor:
            items.append([auth[0]])
        cursor.close()
        cnx.close()
        self.render("authors.html", title="", items=items)


def make_app():
    return tornado.web.Application([
        (r"/menu/(.*)", MenuHandler),
        (r"/chap/(.*)", ChapterHandler),
        (r'/works/(.*)', worksHandler),
        (r"/", authorsHandler),
    ])