#!/usr/bin/python3
# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
import sys, html5lib, time, random
from DB.MysqlDB import *
from NOVEL.MODEl.UnifyBook import *
from NOVEL.MODEl.UnifyBookCapter import *
from REQUEST.RequestUtil import *
from NOVEL.MODEl.UnifyBookCatalog import *
from UTIL.ZlibUtil import *
import operator as op
import re, time, pymysql
class Novel():

    def __init__(self, indexUrl, catalogUrl):
        self.novelUrl = indexUrl
        self.bookUrl = catalogUrl #章节目录地址
        self.novelName = None
        # self.chapterName = []  # 章节名称
        # self.chapterUrl = []  # 章节链接
        # self.chapterId = []  # 章节对应目录id
        # self.catalogs = 0  # 章节目录数
        self.DB = MysqlDB()
        self.request = RequestUtil()
        # self.recentlyChapter = None # 最近下载的章节标题
        self.isFirstSave = 1
    def __del__(self):
        self.DB.closeDB()

    '''***************************************************书籍信息******************************************************'''
    '''
    保存小说书籍信息
    '''
    def saveBookInfo(self, bs):
        # bookInfo = None
        try:
            #解析书籍信息
            bookInfo = self.getBookInfo(bs)
            # 判断书籍信息是否需要保存，不需要返回id
            bookId = self.isNeedSaveBook(bookInfo)
            if bookId is not None:
                # self.isFirstSave = 0
                return bookId
            # 书籍信息保存到db
            saveSql = bookInfo.getSaveSql()
            # print(saveSql)
            saveResult = self.DB.save(saveSql)
            # print(saveResult)
            return saveResult
        except BaseException as e:
            print(e)
            return 0
    '''
    判断小说是否需要保存
    '''
    def isNeedSaveBook(self, bookInfo):
        querySql = bookInfo.getInfoByTitleSql(bookInfo.getTitle())
        results = self.DB.query(querySql)
        for row in results:
            return row[0]
        return

    '''
    获取书籍信息
    '''
    def getBookInfo(self, bs):
        bookInfo = UnifyBook()
        bookInfoSoup = BeautifulSoup(str(bs.find_all('div', class_='info')[0]), 'html5lib')
        # 取标题
        title = bookInfoSoup.h2.text
        cover = self.novelUrl + bookInfoSoup.find_all('img')[0].get('src')
        print('cover is ', cover)
        actoreTextSoup = BeautifulSoup(str(bookInfoSoup.find_all('div', class_='small')[0]), 'html5lib')
        authorInfos = actoreTextSoup.find_all('span')
        bookUpdateTime = authorInfos[4].text[5:]
        author = authorInfos[0].text[3:]
        category = authorInfos[1].text[3:]
        status = authorInfos[2].text[3:]
        words = authorInfos[3].text[3:]
        latestChapter = authorInfos[5].text[5:]
        #描述
        descriptionText = bookInfoSoup.find_all('div', class_='intro')[0].text
        # print(descriptionText)
        description = re.search('(?<=简介：)[\s\S]*?(?=作者：)', descriptionText).group(0)
        # print(description)

        bookInfo.setAuthor(author)
        bookInfo.setCategoryName(category)
        bookInfo.setCover(cover)
        bookInfo.setDescription(description)
        bookInfo.setLatestChapter(latestChapter)
        bookInfo.setWords(words)
        bookInfo.setBookUpdateTime(bookUpdateTime)
        bookInfo.setGmtCreate(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
        bookInfo.setRefUrl(self.bookUrl)
        bookInfo.setStatus(status)
        bookInfo.setTitle(title)
        return bookInfo

    '''***************************************************目录信息******************************************************'''
    '''
    保存目录
    '''
    def saveCatalog(self, bs, bookId):
        texts = bs.find_all('div', class_='listmain')
        chapterSoup = BeautifulSoup(str(texts[0]), 'html5lib')
        cahpterLists = chapterSoup.find_all('a')
        # self.catalogs = len(cahpterLists[15:])
        unifyBookCatalog = UnifyBookCatalog()
        # 查询目录最新的一条数据
        recentlyTitle = None
        recentlyInfoSql = unifyBookCatalog.getRecentlyInfoByBookId(bookId)
        queryResult = self.DB.query(recentlyInfoSql)
        while queryResult is None:
            print("查询记录失败...")
            queryResult = self.DB.query(recentlyInfoSql)
        for row in queryResult:
            recentlyTitle = row[0]

        # 倒叙遍历，直到有条记录与库中记录匹配
        catalogList = []
        for chapterInfo in reversed(cahpterLists[15:]):
            title = chapterInfo.text.split('biqukan')[0]
            if recentlyTitle is not None and op.eq(title, recentlyTitle):
                break
            catalogList.append(chapterInfo)
        # 保证记录正序插入
        for chapter in reversed(catalogList):
            refUrl = self.novelUrl + chapter.get('href')
            chapterName = chapter.text.split('biqukan')[0]
            unifyBookCatalog.setTitle(chapterName)
            unifyBookCatalog.setRefUrl(refUrl)
            unifyBookCatalog.setBookId(bookId)
            unifyBookCatalog.setGmtCreate(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
            # 书籍信息保存到数据库
            saveSql = unifyBookCatalog.getSaveSql()
            id = self.DB.save(saveSql)
            while id == 0:
                time.sleep(3)
                id = self.DB.save(saveSql)
            # self.chapterId.append(id)
            # self.chapterName.append(title)
            print(chapterName, refUrl)
            # self.chapterUrl.append(refUrl)



    '''***************************************************章节信息******************************************************'''
    '''
    保存小说章节内容
    '''
    def saveBookCapter(self, bookId):
        # 查询未处理的章节列表
        unifyBookCatalog = UnifyBookCatalog()
        undownloadSql = unifyBookCatalog.getUndownloadSql(bookId)
        results = self.DB.query(undownloadSql)
        needDealSize = len(results)
        for row in results:
            catalogChapterId = row[0] # '主键ID'
            book_id = row[1] # '书籍ID'
            chapterName = row[2] # '标题'
            chapterUrl = row[3] # '爬虫章节地址'
            isDownload = row[4] # '是否下载 0：否 1是'
            print(chapterName, chapterUrl)
            # chapterName = self.chapterName[i]
            # chapterUrl = self.chapterUrl[i]
            # catalogChapterId = self.chapterId[i]
            executeResult = self.doSaveNovelChapter(chapterUrl, chapterName, catalogChapterId, book_id)
            # print("  已下载:%.3f%%" % float(i / needDealSize) + '\r')
            while executeResult == 0:
                print("章节下载失败！")
                time.sleep(4)
                executeResult = self.doSaveNovelChapter(chapterUrl, chapterName, catalogChapterId)
            # 更新目录章节下载标识
            updateIsDownLoadSql = unifyBookCatalog.getUpdateIsDownLoadSql(catalogChapterId)
            print(updateIsDownLoadSql)
            executeUpdateResult = self.DB.update(updateIsDownLoadSql)
            while executeUpdateResult == 0:
                print("章节下载完成，更新下载标识失败！")
                time.sleep(4)
                executeUpdateResult = self.DB.update(updateIsDownLoadSql)
        return 1

    '''
    解析小说章节并保存
    '''
    def doSaveNovelChapter(self, chapterUrl, chapterName, catalogChapterId, bookId):
        unifyBookCapter = UnifyBookCapter()
        #chapterUrl = 'https://www.biqukan.com/0_790/16118715.html'
        html = self.request.getHtml(url=chapterUrl).text
        bf = BeautifulSoup(html, 'html5lib')
        texts = bf.find_all('div', class_='showtxt')
        try:
            # texts = texts[0].text.replace('\xa0' * 8, '\n\n')
            texts = texts[0].text.replace('\n', '\n\n')
            content = re.search('[\s\S]*?(?=https://www)', texts).group(0)
            unifyBookCapter.setTitle(chapterName)
            unifyBookCapter.setContent(MysqlDB.dealStr(content))
            unifyBookCapter.setRefUrl(chapterUrl)
            unifyBookCapter.setWords(len(content))
            unifyBookCapter.setGmtCreate(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
            unifyBookCapter.setCatalogId(catalogChapterId)
            unifyBookCapter.setBookId(bookId)
            saveSql = unifyBookCapter.getSaveSql()
            print(saveSql)
            id = self.DB.save(saveSql)
            # 更新目录章节状态
            unifyBookCatalog = UnifyBookCatalog()
            unifyBookCatalog.setIsDownload(1)
            unifyBookCatalog.setId(catalogChapterId)
            return id
        except BaseException as e:
            # ip 可用只是被暂时限制了，移除当前代理ip
            self.request.reset_proxy()
            print(e)
            # print("doSaveNovelChapter error, reset proxyIP")
            # print(e)
            return 0

    '''*****************************************************处理入口******************************************************'''
    '''
    copy小说
    '''
    def copyNovel(self):
        # 目录页
        html = self.request.getHtml(self.bookUrl).text
        bs = BeautifulSoup(html, 'html5lib')
        # 书籍信息
        bookId = self.saveBookInfo(bs)
        while bookId == 0:
            bookId = self.saveBookInfo(bs)
        # 目录章节信息存储
        self.saveCatalog(bs, bookId)
        # 章节信息保存
        # self.saveBookCapter(bookId)
        return

    '''
    文件写入
    '''
    def writer(self, name, path, text):
        write_flag = True
        with open(path, 'a', encoding='utf-8') as f:
            f.write(name + '\n')
            f.writelines(text)
            f.write('\n\n')

if __name__ == '__main__':
    dl = Novel('https://www.biqukan.com', 'https://www.biqukan.com/12_12154/')
    dl.copyNovel()
    # content = "111111111233333333333333322222222222sdfsafsdfasdfadsfasdfdfasdfa dsfdf"
    # print(pymysql.escape_string(content))
    # contentArray  = str.encode(content)
    # z_content = zlib.compress(contentArray, zlib.Z_BEST_COMPRESSION)
    # z_content = z_content.decode()
    # print(z_content)
    #
    # z_content = z_content.encode()
    # print(z_content)
    # print(str(z_content, encoding = "utf-8"))
    # print(z_content)
    # z_content = zlib.decompress(z_content).decode('utf-8')
    # print(z_content)









