import re
import requests
import time
import sys
import threading
from openpyxl import load_workbook,Workbook
from spider_xia import SpiderManXia as smx

'''
目录url = https://www.miaobige.com/read/12245/
内容url = https://www.miaobige.com/read/12245/5646550.html
'''
class BookLoad:
        '''
        url:     目录链接
        regex:   目录链接页面中的第一章链接正则
        pingjie: 拼接链接字符串#主要是为了获取目录里第一章链接的拼接
        
        '''
        def __init__(self,
                     url = 'https://www.miaobige.com/read/12245/',
                     regex = '<div class="border-line"></div><li>\
<a href="(.*?)" target="_blank">.*?</a>',
                     pingjie = None,
                     charset = 'gbk'
                     ):
                self.url = url
                self.regex = regex
                self.charset = charset
                
                self.first_chapter_url = self.first_chapter_url(pingjie = pingjie)
        def first_chapter_url(self,pingjie = None):
                '''
                return:目录链接页面中的第一章的链接
                
                '''
                
                f = smx(self.url,charset = self.charset)
                x_url = f.find_info(self.regex)[0]
                
                if pingjie == None:
                        
                        return x_url
                elif pingjie == '书目连接':
                        return self.url+x_url
                else:
                        return pingjie+x_url

        def get_info(self,url,*regex):
                '''
                url:链接
                *regex : 要提取信息的正则
                return : 取信息的一个列表
                '''
                
                f = smx(url,charset = self.charset)
                #print(regex)
                #print(f.find_info(regex[0]))
                
                a = []
                for i in regex:
                        a.append(f.find_info(i))
                
                return a
                
def save_txt(
        charset,
        book_name,
        book_author,
        域名 = 'https://www.miaobige.com',
        url = 'https://www.miaobige.com/read/12245/',
        regex = '<div class="border-line"></div><li><a href="(.*?)" target="_blank">.*?</a>',
        pingjie = 'https://www.miaobige.com',
        regex_next_url = '标记书签</a> <a href="(.*?)" >下一章</a></div>',
        regex_content = '<div id="content">(.*?)</div>',
        regex_title = '<h1>(.*?)</h1>',
        
        
                     ):
        
        '''
        book_name :书名
        book_author:作者
        url:     目录链接
        regex:   目录链接页面中的第一章链接正则
        pingjie: 拼接链接字符串，仅为拼接目录链接中的链接
        regex_next_url: 文章中下一页连接（取出尾缀）
        regex_content:  文章主体内容正则
        regex_title:    文章标题正则
        
        '''
        try:
                
                
                x = BookLoad(url,regex,pingjie = pingjie,charset =charset)
                fc = x.first_chapter_url
                #fc = 'https://www.miaobige.com/read/12245/11129887.html'
                while True:
                        print(fc)
                        #regex_next_url = regex_next_url
                        #regex_content = regex_content
                        #regex_title = regex_title

                        info = x.get_info(fc,
                                          regex_next_url,
                                          regex_content,
                                          regex_title,
                                          )
                        

                        with open('书名-{}-作者-{}.txt'.format(book_name,book_author),'a',errors='ignore') as ff:
                                ff.write(info[2][0])
                                ff.write('\n\n')
                                ff.write(info[1][0].replace('<p>','    ').replace('</p>','\n\n').replace('&nbsp;',' ').replace('<br />','\n'))
                                ff.write('\n\n\n')
                                
                        当前url尾辍 = url.replace(域名,'')
                        
                        if info[0][0] != 当前url尾辍 or info[0]!= [] :#检测当前是否是最后一章
                                next_url = info[0][0]
                                fc = re.sub('\d+\.html',next_url,fc)
                        else:
                                return '下载完毕'
        except:
                with open('Error_logo.txt','a') as ff:
                        
                        ff.write('\n错误出在链接：{}\n'.format(fc))
                        ff.write(str(sys.exc_info())+'\n'+'*'*30)
                        


        
        
                
if __name__ == '__main__':
        save_txt(
                'utf-8',
                '八零后修道记',
                '钓鱼1哥',
                url = 'https://www.miaobige.com/read/12245/',
                regex = '<div class="border-line"></div><li><a href="(.*?)" target="_blank">.*?</a>',
                pingjie = 'https://www.miaobige.com',
                regex_next_url = '标记书签</a> <a href="(.*?)" >下一章</a></div>',
                regex_content = '<div id="content">(.*?)</div>',
                regex_title = '<h1>(.*?)</h1>',
                 )


































