# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
    File Name:  spiderman
    Author   :  wanwei1029
    Date     :  2018/10/9
    Desc     : 本来想用scrapy，但是scrapy对于多线程支持不够好，所以写个支持多线程的spiderman
    这个爬虫遇到的问题：
    1：需要多线程跑。
    2：html解析，不能用lxml，会便有些文件内容丢失。
    3：以标题作为文件名，要去掉特殊字符
------------------------------------------------------------------------------
"""
# import sys
# sys.path.append("D:\\pycharmProjects\\python_samp")

import threading
import time
import os
import samp.spider.aikantxt.spider_utils as utils
from samp.spider.aikantxt.content_downloader import ContentDownloader
from samp.spider.aikantxt.content_downloader import ChapterListDownloader
from samp.spider.aikantxt.data2json import DataToJson
from samp.spider.aikantxt.spider_utils import RedisClient
import samp.samp_logging as sl


BOOK_ID_LIST_NAME = "bookIdList"
EXIST_FLAG = "aikanFlag"
BASE_URL = "https://www.aikantxt.la/aikan"

logger = sl.get_logger("spiderman", os.path.join(utils.SAVE_DIR, "aikanspider.log"))


class SpiderMan(threading.Thread):
    def __init__(self, thread_id):
        threading.Thread.__init__(self)
        self.content_downloader = ContentDownloader()
        self.data2json = DataToJson()
        self.chapter_list_downloader = ChapterListDownloader()
        # pool = redis.ConnectionPool(host=utils.REDIS_HOST, port=utils.REDIS_PORT, db=utils.REDIS_DB,
        # decode_responses=True)
        # self.sr = redis.StrictRedis(connection_pool=pool)
        self.thread_id = thread_id

    def run(self):
        while True:
            # flag = self.sr.get(EXIST_FLAG)
            flag = RedisClient.get_client().get(EXIST_FLAG)
            if flag == "stop":
                logger.info("flag is stop , break now")
                break
            book_id = RedisClient.get_client().lpop(BOOK_ID_LIST_NAME)
            if book_id:
                # book_url = BASE_URL+bytes(book_id)
                book_url = "%s%s" % (BASE_URL, book_id)
                logger.info("{0} start process book url {1}".format("_thread_"+str(self.thread_id), book_url))
                self.process(book_url, book_id)
            else:
                logger.info("{0} start process, but no ids from redis, break".format("_thread_"+str(self.thread_id)))
                break

    def process(self, bookurl, book_id):
        start = time.clock()
        try:
            logger.info("start to bookurl:{0}".format(bookurl))
            chapter_list = self.chapter_list_downloader.download(bookurl, book_id)
            if chapter_list:
                logger.info("{0} has {1} chapters. start download content".format(bookurl, len(chapter_list)))
                for chapter_item in chapter_list:
                    chapter_url = chapter_item['url']
                    content = self.content_downloader.download(chapter_url)
                    if content:
                        chapter_item['content'] = content
                        self.data2json.process_data(chapter_item)
            else:
                logger.error("{0} do not have any chapters, maybe finished_downloaded ".format(bookurl))
        except Exception as e:
            logger.error("error process book_id: {0}".format(book_id))
            logger.error(str(e))
        logger.info("{0} finished, cost {1}".format(bookurl, (time.clock()-start)))


if __name__ == '__main__':
    # thread_num = 80
    # while thread_num:
    #     thread = SpiderMan(thread_num)
    #     thread.start()
    #     thread_num -= 1
    thread = SpiderMan(1)
    thread.process("https://www.aikantxt.la/aikan18681/", 18681)


    print("all done!")
