'''
程序的入口，用于调度爬虫、解析、数据库等模块的协作，完成对帖子的爬取
'''


import sys
import logging
from itertools import chain
from queue import Queue
from threading import Thread
from concurrent import futures
from typing import Iterable, Tuple, Optional

# 这里先使用绝对导入，以便该模块可以作为程序的入口
import spider
import db
from spider import TopicInfo, TopicSpider, FloorSpider, download_pic
from tiebaparser import BaseTiebaParser, TopicPageParser, FloorPageParser
from db import TiebaDB, open_tiebadb


# 下面的线程中，主贴爬虫、楼中楼爬虫、图片下载按顺序先后开启，总数有上限；
# 主贴和楼中楼解析按顺序先后开启，上限为1；数据库仅1个线程
# 主贴爬虫：out：爬到网页后交给topic_q
# 主贴解析：in ：从topic_q中获取html
#           out：解析完成后，把楼中楼id放入floor_id_q，把图片放入pics_q，
#                把帖子内容放到db_q(对应数据库操作)
# 楼中楼爬虫：in ：从floor_id_q中获取楼中楼id
#             out：爬到网页后交给floor_q
# 楼中楼解析：in ：从floor_q中获取html
#             out：解析完成后，把图片放入pics_q，把帖子内容放到db_q
# 图片下载：in：从pics_q中获取图片url并下载


# 全局变量
interrupt = False
log = logging.getLogger(__name__)


class SpiderThreads:
    '''用于维护负责爬取网页的线程的类'''

    def __init__(self,
                 topic_info: "TopicInfo",
                 topic_q: Queue,
                 # db_q: Queue,
                 floor_id_q: Queue,
                 floor_q: Queue,
                 pics_q: Queue,
                 max_threads=10):

        self._topic_info = topic_info    # 要爬取的帖子的基本信息
        self._topic_q = topic_q    # type: Queue[Optional[str]]  # 存放html
        # self._db_q = db_q    # 爬虫实际上不需要与这个数据库队列交互
        self._floor_id_q = floor_id_q  # type: Queue[Optional[Tuple[int, int]]]
        self._floor_q = floor_q  # type: Queue[Optional[str]]  # 存放html
        self._pics_q = pics_q    # type: Queue[Optional[str]]  # 存放图片url
        self._max_threads = max_threads

        self.pic_dir = spider.PIC_DIR
        self.update_pics = spider.UPDATE_PICS
        self.executor = futures.ThreadPoolExecutor(
            max_workers=max_threads,
            thread_name_prefix="SpiderThread"
        )
        self._executor_thread = None    # type: Optional[Thread]

    @property
    def max_threads(self):
        return self._max_threads

    def topic_spider(self, pages: Iterable):
        '''爬取主贴页面，交给线程池执行'''
        check_interrupt()
        spider = TopicSpider(topic_info=self._topic_info)
        for i, webpage, code in spider.get_topic_page(pages):
            log.info(f"第{i}页: state_code = {code}")
            if webpage is not None:
                self._topic_q.put(webpage.html)
            else:
                log.warning(f"第{i}页 爬取失败。")
            check_interrupt()

    def floor_spider(self):
        '''爬取楼中楼页面，交给线程池执行'''
        check_interrupt()
        while True:
            id_and_npages = self._floor_id_q.get()  # XXX 此处的阻塞会阻止异常引发
            if id_and_npages is None:
                # 拿到以后必须放回去，不然不够那么多线程拿，就会阻塞
                self._floor_id_q.put_nowait(None)
                break
            else:
                id_, n_pages = id_and_npages

            spider = FloorSpider(id_, topic_info=self._topic_info)
            for i, webpage, code in spider.get_floor_page(range(1, n_pages+1)):
                log.info(f"楼中楼{id_} 第{i}页: state_code = {code}")
                if webpage is not None:
                    self._floor_q.put(webpage.html)
                else:
                    log.warning(f"楼中楼{id_} 第{i}页 爬取失败。")
                check_interrupt()

    def pic_downloader(self):
        '''下载帖子中的图片，交给线程池执行'''
        check_interrupt()
        pics_q = self._pics_q
        pic_dir = self.pic_dir
        update_pics = self.update_pics
        while True:
            url = pics_q.get()    # XXX 此处的阻塞会阻止异常引发
            if url is None:
                # 拿到以后必须放回去，不然不够那么多线程拿，就会阻塞
                pics_q.put_nowait(None)
                break
            if not download_pic(url, root_dir=pic_dir, update=update_pics):
                log.warning(f"图片{url.split()[-1]}下载失败")
            check_interrupt()

    def _create_threads(self):
        '''使用线程池创建爬取网页的各个线程（自动开始运行）'''
        topic_info = self._topic_info
        max_threads = self._max_threads

        # 根据最大线程数分配任务
        q, r = map(int, divmod(topic_info.total_pages, max_threads))
        page = 1
        pages_list = []
        for i in range(max_threads):
            page2 = page + (q + 1 if i < r else q)
            pages_list.append(range(page, page2))
            page = page2
        del page, page2

        with self.executor as e:
            # 加入主贴爬虫线程
            topic_spider_futures = [e.submit(self.topic_spider, pages)
                                    for pages in pages_list]
            # 加入楼中楼爬虫线程
            floor_spider_futures = [e.submit(self.floor_spider)
                                    for _ in range(max_threads)]
            # 加入图片下载线程
            pic_downloader_futures = [e.submit(self.pic_downloader)
                                      for _ in range(max_threads)]

            def check_interrupt():
                '''检查中断，并取消尚未开始运行的期程'''
                if interrupt:
                    for future in chain(topic_spider_futures,
                                        floor_spider_futures,
                                        pic_downloader_futures):
                        future.cancel()

            # 等待每组期程运行结束，并向相应的队列发送None
            futures.wait(topic_spider_futures)
            self._topic_q.put(None)
            check_interrupt()
            futures.wait(floor_spider_futures)
            self._floor_q.put(None)
            check_interrupt()
            futures.wait(pic_downloader_futures)

    def start(self):
        '''启动线程'''
        log.info("开始运行。")
        self._executor_thread = Thread(target=self._create_threads,
                                       name="Spider Executor Thread")
        self._executor_thread.start()

    def join(self, per_loop=1):
        '''等待线程执行完毕'''
        try:
            while self._executor_thread.is_alive():
                self._executor_thread.join(per_loop)
        except KeyboardInterrupt:
            global interrupt
            interrupt = True
            self._executor_thread.join()
            raise


class ParserThread(Thread):
    '''负责运行解析器的线程'''
    def __init__(self, parser: "BaseTiebaParser",
                 html_q: Queue, pics_q: Queue, db_q: Queue):
        super().__init__()
        self._parser = parser
        self._html_q = html_q    # topic_q或是floor_q
        self._pics_q = pics_q
        self._db_q = db_q

    def run(self):
        while True:
            html = self._html_q.get()
            if html is None:
                break
            log.info(f"    {self.name} 正在解析...")
            self._parser.feed(html)
            self._put_items()
        self.done()

    def join(self, per_loop=1):
        try:
            while self.is_alive():
                super().join(per_loop)
        except KeyboardInterrupt:
            global interrupt
            interrupt = True
            super().join(per_loop)
            raise

    def done(self):
        '''解析完成，向相应的队列传入None以告知解析结束'''
        raise NotImplementedError("Method hasn't been implemented yet.")

    def _put_items(self):
        '''将解析得到的内容放入相应的队列'''
        raise NotImplementedError("Method hasn't been implemented yet.")


class TopicParserThread(ParserThread):
    '''主题帖解析线程'''

    def __init__(self, parser: "TopicPageParser", topic_q: Queue,
                 floor_id_q: Queue, pics_q: Queue, db_q: Queue,
                 **kwargs):
        super().__init__(parser, html_q=topic_q,
                         pics_q=pics_q, db_q=db_q)
        self.name = f"Topic Parser {self.name}"
        self._parser: "TopicPageParser"
        self._floor_id_q = floor_id_q

    def done(self):
        self._floor_id_q.put(None)
        log.info(f"{self.name}主题帖页面解析完成!!!!!!!!!!!!!!!!!")

    def _put_items(self):
        parser = self._parser
        pics = parser.pics
        contents = parser.contents
        for id_and_npages in parser.floor_id_npages:
            self._floor_id_q.put(id_and_npages)
        while pics:
            self._pics_q.put(pics.popleft())
        while contents:
            self._db_q.put(contents.popleft())


class FloorParserThread(ParserThread):
    '''楼中楼页面解析线程'''

    def __init__(self, parser: "FloorPageParser",
                 topic_parser_thread: "TopicParserThread",
                 floor_q: Queue, pics_q: Queue, db_q: Queue,
                 **kwargs):
        super().__init__(parser, html_q=floor_q,
                         pics_q=pics_q, db_q=db_q)
        self.name = f"Floor Parser{self.name}"
        self._parser: "FloorPageParser"
        self._topic_parser_thread = topic_parser_thread

    def done(self):
        self._topic_parser_thread.join()
        # 要先等待主贴解析完成才能告知pics_q和db_q已结束
        self._pics_q.put(None)
        self._db_q.put(None)
        log.info("楼中楼页面解析完成!!!!!!!!!!!!!!!!!!!!")

    def _put_items(self):
        parser = self._parser
        pics = parser.pics
        contents = parser.contents
        while pics:
            self._pics_q.put(pics.popleft())
        while contents:
            self._db_q.put(contents.popleft())


class SqliteThread(Thread):
    '''操作sqlite数据库的线程'''
    def __init__(self, db_q: Queue, fname="tieba.db",
                 autocommit=False, update=False):
        super().__init__()
        self._db_q = db_q
        self.fname = fname
        self.autocommit = autocommit
        self.update = update

    def run(self):
        with open_tiebadb(self.fname) as f:
            f.autocommit = self.autocommit
            f.update = self.update
            while True:
                content = self._db_q.get()
                if content is None:
                    break
                f.write(content)
            f.connect().commit()
        log.info("写数据库完成")

    def join(self, per_loop=1):
        try:
            while self.is_alive():
                super().join(per_loop)
        except KeyboardInterrupt:
            global interrupt
            interrupt = True
            super().join(per_loop)
            raise


def check_interrupt():
    '''检查全局变量interrupt，若为真则提起KeyboardInterrupt异常'''
    if interrupt:
        raise KeyboardInterrupt


def sync():
    '''单线程同步爬虫'''
    spider = TopicSpider(1766018024)
    parser = TopicPageParser()
    for i, webpage, code in spider.get_topic_page(range(1, 4)):
        print(f"第{i}页: state_code = {code}")
        if webpage is not None:
            parser.feed(webpage.html)
        else:
            print(f"第{i}页 爬取失败。")


def config_log():
    '''设置日志输出'''
    fmter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
    handler = logging.StreamHandler()
    handler.setStream(sys.stdout)
    handler.setLevel(logging.DEBUG)
    handler.setFormatter(fmter)
    log.setLevel(logging.INFO)
    log.addHandler(handler)


def main():
    # logging.basicConfig(level=logging.INFO)
    config_log()

    topic_q = Queue()
    db_q = Queue()
    floor_id_q = Queue()
    floor_q = Queue()
    pics_q = Queue()

    # 创建爬虫线程
    # topic_info = TopicInfo(topic_id=1766018024)
    topic_info = TopicInfo(topic_id=3292783059)
    if not topic_info.download_topic_info():
        log.error("获取帖子信息失败")
        sys.exit(1)
    spider_threads = SpiderThreads(
        topic_info=topic_info,
        topic_q=topic_q,
        floor_id_q=floor_id_q,
        floor_q=floor_q,
        pics_q=pics_q,
        max_threads=15
    )
    spider_threads.pic_dir = spider.PIC_DIR    # "data/pics"
    spider_threads.update_pics = spider.UPDATE_PICS    # False

    # 创建解析线程
    topic_parser_thread = TopicParserThread(
        TopicPageParser(),
        topic_q=topic_q,
        floor_id_q=floor_id_q,
        pics_q=pics_q,
        db_q=db_q
    )
    floor_parser_thread = FloorParserThread(
        FloorPageParser(),
        topic_parser_thread=topic_parser_thread,
        floor_q=floor_q,
        pics_q=pics_q,
        db_q=db_q
    )

    # 创建数据库线程
    sqlite_thread = SqliteThread(db_q=db_q, update=False)
    sqlite_thread.fname = db.DEFAULT_PATH    # "data/tieba.db"
    sqlite_thread.update = db.UPDATE_DB    # False
    # 不建议传入autocommit=True，因为这样会导致写数据库变得相当慢
    # 详见https://www.sqlite.org/faq.html#q19

    spider_threads.start()
    topic_parser_thread.start()
    floor_parser_thread.start()
    sqlite_thread.start()

    spider_threads.join()
    topic_parser_thread.join()
    floor_parser_thread.join()
    sqlite_thread.join()

    log.info("主线程完成。")


if __name__ == "__main__":
    main()
