import re
import requests
import os.path

class ZggdwxNovel:
    
    host = 'http://www.zggdwx.com'

    book_info_regex = dict(

        book_name = '<h1>(.*?)</h1>',
        # book_author = '<meta property="og:novel:author" content="(.*?)" />',
        book_intor = '<meta name="description" content="(.*?)" />',
        book_title = '<title>(.*?)</title>',
        book_chapter = "<fieldset class=\"catalog\">(.*?)</fieldset>",
    )

    chapter_info_regex = dict(

        chapter_name = '<h1>(.*?)</h1>',

        chapter_content = '<div class="content">((?is:.*?))</div>',

        # next_chapter_url = '<a href="(.*?)"><div class="page-next"',#不正确

        # pre_chapter_url = '<a href="(.*?)"><div class="page-prev"',#不正确
        next_chapter_url = '<div class="leftbarspace"></div><a href="(.*?)">',

        pre_chapter_url = '<div class="rightbarshow"><a href="(.*?)">',

    )

    def __init__(self):

        pass

    def get_book_info(self, url):#提取小说目录的信息

        Novel_info = requests.get(url)

        Novel_info.encoding = Novel_info.apparent_encoding

        html_str = Novel_info.text

        book_info = {key: re.findall(value, html_str) for key, value in self.book_info_regex.items()}

        if book_info['book_intor']:

            book_info['book_intor'] = book_info['book_intor'][0]

        else:

            book_info['book_intor'] = book_info['book_title']

        book_info['host'] = self.host

        book_info['author'] = []

        book_chapter_list = re.findall('<a href="(.*?)" target="_blank">(.*?)</a>', book_info['book_chapter'][0])

        book_info['book_chapter'] = {i[1]: i[0] for i in book_chapter_list}

        return book_info

    def get_chapter_info(self, chapter_url):#提取小说章节
        '''
        chapter_url: 章节阅读页面
        return : dict

        {
            "chapter_name": '',
            "chapter_content": "",
            "chapter_url": "",
            "next_chapter_url": "",
            "pre_chapter_url": "",
            "host": "",
        }
        '''

        Novel_info = requests.get(chapter_url)

        Novel_info.encoding = Novel_info.apparent_encoding

        html_str = Novel_info.text

        info = {key: re.findall(value, html_str) for key, value in self.chapter_info_regex.items()}

        if not info['pre_chapter_url']:

            info['pre_chapter_url'] = [chapter_url]
        
        if not info['next_chapter_url']:
            
            info['next_chapter_url'] = [chapter_url]
            
        for i in info:#字典循环返回键

            info[i] = info[i][0]
        
        info['chapter_url'] = chapter_url
        info['host'] = self.host
        info['book_url'] = os.path.dirname(chapter_url)


        return info

    def get_search_info(self, keywords):#搜索书信息

        '''
        keywords: 搜索的关键字
        return:dict
        {
            "keywords": '',
            "host": "",
            "search_info":[
                {'book_name': "", "book_url": "", "book_author": "", "book_status": ""},
                ... ...
            ]

        }
        '''
        header = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Host': 'www.baidu.com',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
        }
        search_url = 'https://www.baidu.com/s'
        keywords_site = 'site:zggdwx.com {}'.format(keywords)       


        r = requests.get(
            search_url, 
            params = {'wd': keywords_site},
             headers = header,
        )

        r.encoding = r.apparent_encoding

        html = r.text

        regex = '''data-tools='{"title":"(.*?)","url":"(.*?)"}'''

        info = re.findall(regex, html)[:2]

        info = [{'book_name': i[0], "book_url": i[1], "book_author": "", "book_status": ""} for i in info]

        search_info = []
        d_regex = 'http://www.zggdwx.com/\w+?/\d+?.html' #检测格式的正则
        for i in info: #猎取的是百度的链接，要访问一次获取源网站的链接
            rel_url = requests.get(i["book_url"], headers = header).url
            
            if not re.match(d_regex, rel_url):

                i["book_url"] = rel_url
                
                search_info.append(i)
        

        info_dict = {

            "keywords": keywords,
            "host": self.host,
            "search_info": search_info

        }

        return info_dict


if __name__ == "__main__":
    
    x = ZggdwxNovel()
    print(x.get_search_info("春秋公羊传"))
    #作业解决一个bug
    # print(x.get_book_info('http://www.zggdwx.com/dbrblmdj.html'))#因为第一章节没有上一页所以报错，请解决这个bug

    # print(x.get_chapter_info('http://www.zggdwx.com/gongyang/3.html'))



