# coding:utf-8
__author__ = "Xuhao"

from lxml import  etree
from data.MongoHelper import MongoHelper
from spider.HtmlDownloader import Html_Downloader
from data.SqlHelper import SqlHelper

from config import RULES
from config import CONTENTFILTER
import re


class Html_Parser(object):

    def pares(self, response, parser):
        '''

        :param response: 响应结果
        :param parser: 解析方式,网站来源
        :return:
        '''

        if parser['type'] == 'xpath':
            pass

    def XpathPraser(self, response, rules):
        '''
        这里只适用于收集书籍名称，和来源
        :param response: 请求响应结果
        :param rules: 解析规则
        :return:
        '''

        bookList = []
        root = etree.HTML(response)
        books = root.xpath(rules['pattern'])
        for book in books:
            try:
                bookName = book.xpath(rules['position']['bookName'])[0].text.strip()
                bookUrl = book.xpath(rules['position']['bookUrl'])[0]
                if rules['url'] == 'http://www.suimeng.la//':
                    bookNewChapter = book.xpath(rules['position']['newChapterName'])[0].text.strip()
                    bookNewChapterUrl = book.xpath(rules['position']['newChapterUrl'])[0]
                    try:
                        str = book.xpath(rules['position']['bookAuthor'])[0].text
                        bookAuthor = re.findall(r'.*?作者：(.*?)类型', str, re.S)[0].strip()
                    except Exception as e:
                        try:
                            bookAuthor = book.xpath(rules['position']['bookAuthor'] + '/a')[0].text
                        except Exception as e:
                            continue
                else:
                    bookNewChapter = book.xpath(rules['position']['newChapterName'])[0].text.strip()
                    bookNewChapterUrl = book.xpath(rules['position']['newChapterUrl'])[0]
                    bookAuthor = book.xpath(rules['position']['bookAuthor'])[0].text.strip()

                novel = {'bookName': bookName, 'bookAuthor': bookAuthor, 'bookUrl': bookUrl,
                         'bookDataName': '', 'bookNewChapter': '', 'bookUpdate': '',
                         'bookCategory': 3, 'bookType': '', 'bookNewChapter': bookNewChapter,
                         'bookNewChapterUrl': bookNewChapterUrl
                         }
                bookList.append(novel)
            except Exception as e:
                print(e)
                continue
            #这里把数据追加进 bookList
            #print('bookNmae = %s, bookAuthor = %s, bookUrl = %s' % (bookName,bookAuthor,bookUrl))

        return bookList
            # response = Html_Downloader.download(bookUrl)
            # if response is not None:
            #     self.ChapterXpathPraser(bookUrl, response, rules)

    def ChapterXpathPraser(self, url, response, rules):
        '''
        解析小说章节，和章节内容
        :param url:
        :param response:
        :param rules:
        :return: 返回所有章节URL，和内容
        '''
        root = etree.HTML(response)
        listmain = root.xpath(rules['chapter_selector'])
        #print(listmain)
        #print(len(listmain))
        chapterList = []
        for list in listmain:
            try:
                #判断小说章节URL解析方式
                if rules['urlConnectType'] == '2':#表示用域名拼接
                    tmp = list.xpath(rules['chapter_position']['chapterUrl'])[0]
                    chapterUrl = rules['url'] + tmp
                elif rules['urlConnectType'] == '1': #表示章节链接使用自身自带的URL
                    chapterUrl = list.xpath(rules['chapter_position']['chapterUrl'])[0]
                elif rules['urlConnectType'] == '0':#表示章节链接需要用当前页面URL拼接
                    chapterUrl = url + list.xpath(rules['chapter_position']['chapterUrl'])[0]
                chapterName =  list.xpath(rules['chapter_position']['chapterName'])[0].text.strip()
                novels = {'chapterName': chapterName,
                          'chapterUrl': chapterUrl,
                          }
                print(novels)
                chapterList.append(novels)
            except Exception as e:
                print(novels)
                continue
            #print('chapterName= %s, chapterUrl = %s\r\n' % (chapterName, chapterUrl))
            # responsecontent = Html_Downloader.download(chapterUrl)
            # if responsecontent is not None:
            #     text = self.contentXpathPraser(response=responsecontent, rules=rules)

        #print(chapterList)
        return chapterList

    def contentXpathPraser(self, response, rules):
        '''
        解析小说内容
        :param response: 请求响应结果
        :param rules: 解析规则
        :return:
        '''
        root = etree.HTML(response)
        try:
            text = root.xpath(rules['content_selector'])
            string = ""
            for tmp in text:
                string += re.sub(CONTENTFILTER, '', tmp)
            str = re.sub(rules['filter'], '', string)
            return str
        except BaseException as e:
            print(e)



if __name__ == "__main__":

    try:
        sqlhelper = SqlHelper()
        sqlhelper.init_db()
        print('初始化成功')
    except Exception as e:
        print(e)

    html_parser = Html_Parser()
    # rule = RULES[0]
    # #测试抓取小说Url
    # for url in rule['urls']:
    #     print(url)
    #     resonse = Html_Downloader.download(url)
    #     if resonse is not None:
    #         bookList = html_parser.XpathPraser(resonse, rule)
    #         if bookList is not  None:
    #             for book in bookList:
    #                 #这里需要判断是否存在
    #                 bookName = book['bookName']
    #                 bookAuthor = book['bookAuthor']
    #                 condition = {'bookName':bookName, 'bookAuthor':bookAuthor}
    #                 #print(condition)
    #                 checkBook = sqlhelper.select(conditions=condition)
    #                 if len(checkBook) > 0:
    #                     #print(checkBook[0].bookName)
    #                     print('更新小说:%s' % (book['bookName']))
    #                     sqlhelper.update(conditions=condition, value=book)
    #                 else:
    #                     print('添加小说:%s' % (book['bookName']))
    #                     sqlhelper.insert(book)
#http://www.biqukan.com/0_790/  http://www.66ip.cn/

#测试抓取章节信息
    # res = Html_Downloader.download('http://www.suimeng.la/files/article/html/12/12199/')
    # if res is not None:
    #     html_parser.ChapterXpathPraser(url='http://www.suimeng.la/files/article/html/12/12199/',response=res, rules=RULES[1])

#测试抓取文章内容
# res = Html_Downloader.download('http://www.suimeng.la/files/article/html/12/12199/16677596.html')
# if res is not None:
#     html_parser.contentXpathPraser(res, RULES[1])
#
#     from data.MongoHelper import MongoHelper as SqlHelper
#
#     sqlhelper = SqlHelper()
#
#     for i in ['http://www.suimeng.la/files/article/html/33/33665/']:
#         sqlhelper.init_db('绝世唐门+唐家三少')
#         html_parser = Html_Parser()
#         rule = RULES[1]
#         resonse = Html_Downloader.download(i)
#         if resonse is not  None:
#             lists = html_parser.ChapterXpathPraser(url=i,response=resonse,rules=rule)
#             count = 0;
#             print('一共 %s 章节'% len(lists))
#             for list in lists:
#                 chapterurl = list['chapterUrl']
#                 chaptername = list['chapterName']
#                 responsecontent = Html_Downloader.download(chapterurl)
#                 if responsecontent is not None:
#                     text = html_parser.contentXpathPraser(response=responsecontent, rules=rule)
#                     novels = {'chapterName': chaptername,
#                               'chapterUrl': chapterurl,
#                               'chapterContent': text,
#                               'chapterID': count}
#                     sqlhelper.insert(value=novels)
#                     print('存储第 %s 章: %s' % (count, chaptername))
#                     count += 1
#         else:
#             print('抓取失败')
