from spider import Spider
import re
import requests

'''
url = 'https://www.23us.so/files/article/html/13/13694/6167429.html'

regex_content = '<dd id="contents">([\s\S]*?)</dd>'

regex_chapter = '<h1> (.*?)</h1>'


'''



class BookInfo(Spider):

    def __init__(self,
                 encode,
                 url,
                 regex_book_name,
                 regex_book_author,
                 regex_chapter_url,
                 ):

        super().__init__(url=url,
                         encode = encode,
                         book_name = regex_book_name,
                         book_author = regex_book_author,
                         chapter_url = regex_chapter_url,
                         #regex_chapter_name,
                         )
        #self.joint_chapter_url = joint_chapter_url

        self.info = self.get_info()


class ChapterInfo(Spider):

    def __init__(self,
                 encode,
                 url,
                 regex_chapter_name,
                 regex_chapter_content,

                 ):

        super().__init__(encode = encode,
                         url=url,
                         chapter_name = regex_chapter_name,
                         chapter_content = regex_chapter_content,

                         )

        self.info = self.get_info()
    
class Down():

    def __init__(self,
                 url,
                 book_regex_dict,
                 chapter_regex_dict,
                 joint_chapter_url = None,
                 ):
        self.url = url
        self.book_regex_dict = book_regex_dict
        self.chapter_regex_dict = chapter_regex_dict
        self.joint_chapter_url = joint_chapter_url

    def down_land(self):
        book_info = BookInfo(encode = 'utf-8',
                             url = self.url,
                             **self.book_regex_dict).info

        chapter_url_list = book_info['chapter_url'][:5]
        book_name = book_info['book_name'][0]
        book_author = book_info['book_author'][0]
        
        for chapter_url in chapter_url_list:

            if self.joint_chapter_url != None:

                chapter_url = self.joint_chapter_url+chapter_url

            chapter_info = ChapterInfo(encode = 'utf-8',
                                       url = chapter_url,
                                       **self.chapter_regex_dict
                                       ).info
            chapter_name = chapter_info['chapter_name'][0]
            chapter_content = chapter_info['chapter_content'][0]

        
            with open('书名：{}-作者：{}.txt'\
                      .format(book_name,book_author),'a') as f:

                f.write(chapter_name)
                f.write('\n\n')
                f.write(chapter_content)
                f.write('\n\n')
                f.write('*'*20)
                f.write('\n\n')
                
            print(chapter_name)
                

if __name__ == '__main__':
    

    book_regex_for_23usso = dict(
        regex_book_name = '<h1 class="Title">(.*?)</h1>',
        regex_book_author = '<div class="Author">作者：\
<a target="_blank" href=".*?">(.*?)</a></div>',
        regex_chapter_url = '<a target="_blank" href="/chapter/(.*?)"',
                                )

    chapter_regex_for_23usso = dict(
        regex_chapter_name = '<h1>([\s\S]*?)</h1>',
        regex_chapter_content = '<div class="p">([\s\S]*?)<div class="author-say"></div>',
        )

    url = 'http://www.17k.com/list/2856663.html'
    joint_chapter_url = 'http://www.17k.com/chapter/'
    
    
    x = Down(url,
             book_regex_for_23usso,
             chapter_regex_for_23usso,
             joint_chapter_url,)
    x.down_land()


    
    

                

        
        

    

    

    

    

            
    

























    



