# coding:utf-8
__author__ = "Xuhao"

from spider.HtmlDownloader import Html_Downloader
from spider.HtmlPraser import Html_Parser
import time
from  config import RULES
from gevent import monkey
monkey.patch_all()
from gevent.pool import Pool
from multiprocessing import  Queue, Process
import gevent
import sys
from data.DataStore import store_data
from data.SqlHelper import SqlHelper
from data.MongoHelper import MongoHelper

def startCrawl(object):
    crawl = NovelCrawl()
    #crawl.runCrawNovelInfo()
    crawl.runCrawNoveContent()

class NovelCrawl(object):
    def __init__(self):
        #self.queue = queue
        self.sqlhelper = SqlHelper()
        self.sqlhelper.init_db()
        self.mongohelper = MongoHelper()
    def runCrawNoveContent(self):
        while True:
            novelList = self.sqlhelper.select(count=200)
            if not  novelList:
                return None
            for novel in novelList:
                tablename = novel.bookName + '+' +novel.bookAuthor
                self.mongohelper.init_db(tablename)
                html_parser = Html_Parser()
                bookUrl = novel.bookUrl
                if 'www.suimeng.la' in bookUrl:
                    rule = RULES[1]
                else:
                    rule = RULES[0]
                resonse = Html_Downloader.download(bookUrl)
                if resonse is not  None:
                    lists = html_parser.ChapterXpathPraser(url=bookUrl,response=resonse,rules=rule)
                    #这里不用检查小说对应的数据库是否已经创建
                    #检查小说的最大章节ID
                    tmp = self.mongohelper.selectLast()
                    if tmp is None:#如果没有查找到说明没有该小说
                        chapterID = 0;
                        print('存储新小说: %s , 一共 %s 章' % (tablename, len(lists)))
                        for list in lists:
                            chapterurl = list['chapterUrl']
                            chaptername = list['chapterName']
                            responsecontent = Html_Downloader.download(chapterurl)
                            if responsecontent is not None:
                                text = html_parser.contentXpathPraser(response=responsecontent, rules=rule)
                                novels = {'chapterName': chaptername,
                                          'chapterUrl': chapterurl,
                                          'chapterContent': text,
                                          'chapterID': chapterID}
                                self.mongohelper.insert(value=novels)
                                print('存储小说：%s，章节ID：%d/%d ,章节名称:%s' % (tablename, chapterID, len(lists),chaptername))
                                chapterID += 1
                    else:
                        maxID = tmp['chapterID']

                        if maxID < len(lists): #如果maxID小于最新获取的章节总数那么就需要更新
                            #找到maxID 对应在lists 中的位置
                            results = lists[(maxID + 1):]
                            chapterID = maxID + 1
                            for result in  results:
                                chapterurl = result['chapterUrl']
                                chaptername = result['chapterName']
                                responsecontent = Html_Downloader.download(chapterurl)
                                if responsecontent is not None:
                                    text = html_parser.contentXpathPraser(response=responsecontent, rules=rule)
                                    novels = {'chapterName': chaptername,
                                              'chapterUrl': chapterurl,
                                              'chapterContent': text,
                                              'chapterID': chapterID}
                                    self.mongohelper.insert(value=novels)
                                    print('更新小说：%s，章节ID：%d/%d ,章节名称:%s' % (tablename, chapterID, len(lists),chaptername))
                                    chapterID += 1

                else:
                    print('抓取失败')

            time.sleep(60 * 20)

    def runCrawNovelInfo(self):
        while True:
            for i in  RULES:
                self.crawlNovelInfo(i)

        # while True:
        #     str = '\r\nstart crawling...'
        #     sys.stdout.write(str + "\r\n")
        #     sys.stdout.flush()
        #     spanwns = []
        #     spanwns.append(gevent.spawn(self.crawlNovelInfo, RULES[0]))
        #     spanwns.append(gevent.spawn(self.crawlNovelInfo, RULES[1]))
        #     gevent.joinall(spanwns)
        #
            str = '\r\nsleep 30s ...'
            sys.stdout.write(str + "\r\n")
            sys.stdout.flush()
            time.sleep(30)

    # def crawNovelContent(self, chapterurl,  chaptername, chapterID, rule):
    #     responsecontent = Html_Downloader.download(chapterurl)
    #     if responsecontent is not None:
    #         text = html_parser.contentXpathPraser(response=responsecontent, rules=rule)
    #         novels = {'chapterName': chaptername,
    #                   'chapterUrl': chapterurl,
    #                   'chapterContent': text,
    #                   'chapterID': chapterID}
    #         self.mongohelper.insert(value=novels)
    #         print('更新小说：%s，章节ID：%d/%d ,章节名称:%s' % (tablename, chapterID, len(lists), chaptername))
    #         chapterID += 1

    def crawlNovelInfo(self, rules):
        '''
        爬取小说信息
        :param rules:
        :return:
        '''
        html_parser = Html_Parser()
        for url in rules['urls']:
            print(url)
            resonse = Html_Downloader.download(url)
            if resonse is not None:
                bookList = html_parser.XpathPraser(resonse, rules)
                if bookList is not None:
                    for book in bookList:
                        bookName = book['bookName']
                        bookAuthor = book['bookAuthor']
                        condition = {'bookName': bookName, 'bookAuthor': bookAuthor}
                        #查询是否存在
                        checkBook = self.sqlhelper.select(conditions=condition)
                        if len(checkBook) > 0:
                            print('更新小说:%s' % (book['bookName']))
                            self.sqlhelper.update(conditions=condition, value=book)
                        else:
                            print('添加小说:%s' % (book['bookName']))
                            self.sqlhelper.insert(book)

                            # if self.queue.full():
                            #     time.sleep(0.1)
                            # else:
                            #     self.queue.put(book)
                            #     break


if __name__ == "__main__":
      # q1 = Queue()
      # p0 = Process(target=startCrawl, args=(q1,))
      # p0.start()


      crawl = NovelCrawl()
      #crawl.crawlNovelInfo(RULES[1])
      crawl.runCrawNoveContent()