# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
    File Name:  spiderman
    Author   :  wanwei1029
    Date     :  2018/10/9
    Desc     : 多进程，不能覆盖__init__方法。
------------------------------------------------------------------------------
"""
import sys
sys.path.append("D:\\pycharmProjects\\python_samp")

import multiprocessing
import time
import traceback
import samp.spider.aikantxt.spider_utils as utils
from samp.spider.aikantxt.content_downloader import ContentDownloader
from samp.spider.aikantxt.content_downloader import ChapterListDownloader
from samp.spider.aikantxt.data2json import DataToJson
from samp.spider.aikantxt.spider_utils import RedisClient
import samp.samp_logging as sl

from cProfile import Profile
import os


BOOK_ID_LIST_NAME = "bookIdList"
EXIST_FLAG = "aikanFlag"
BASE_URL = "https://www.aikantxt.la/aikan"

logger = sl.get_logger("spiderman", os.path.join(utils.SAVE_DIR, "aikanspider.log"))


class MuSpiderMan(multiprocessing.Process):
    # def __init__(self):
    #     multiprocessing.Process.__init__(self)
    #     self.content_downloader = ContentDownloader()
    #     self.data2json = DataToJson()
    #     self.chapter_list_downloader = ChapterListDownloader()

    def run(self):
        content_downloader = ContentDownloader()
        data2json = DataToJson()
        chapter_list_downloader = ChapterListDownloader()
        while True:
            # flag = self.sr.get(EXIST_FLAG)
            # prof = Profile()
            # prof.enable()
            flag = RedisClient.get_client().get(EXIST_FLAG)
            if flag == "stop":
                logger.info("flag is stop , break now")
                break
            book_id = RedisClient.get_client().lpop(BOOK_ID_LIST_NAME)
            if book_id:
                # book_url = BASE_URL+bytes(book_id)
                book_url = "%s%s" % (BASE_URL, book_id)
                logger.info("{0} start process book url {1}".format("_thread_"+str(self.name), book_url))
                self.process(book_url, book_id, content_downloader, data2json, chapter_list_downloader)
            else:
                logger.info("{0} start process, but no ids from redis, break".format("_thread_"+str(self.name)))
                break
            # prof.disable()
            # prof.dump_stats(os.path.join("E:\\nas\\spider\\profile\\", str(book_id)+".out"))

    def process(self, bookurl, book_id, content_downloader, data2json, chapter_list_downloader):
        start = time.clock()
        try:
            logger.info("start to bookurl:{0}".format(bookurl))
            chapter_list = chapter_list_downloader.download(bookurl, book_id)
            if chapter_list:
                logger.info("{0} has {1} chapters. start download content".format(bookurl, len(chapter_list)))
                for chapter_item in chapter_list:
                    chapter_url = chapter_item['url']
                    content = content_downloader.download(chapter_url)
                    if content:
                        chapter_item['content'] = content
                        data2json.process_data(chapter_item)
            else:
                logger.error("{0} do not have any chapters, maybe finished_downloaded ".format(bookurl))
        except Exception as e:
            logger.error("error process book_id: {0}".format(book_id))
            logger.error(str(e))
            logger.error(traceback.format_exc())
        logger.info("{0} finished, cost {1}".format(bookurl, (time.clock()-start)))


if __name__ == '__main__':
    thread_num = 10
    while thread_num:
        p = MuSpiderMan()
        p.start()
        thread_num -= 1

    print("all done!")
