# -*- coding: utf-8 -*-
__author__ = 'Yh'

from scrapy.spiders import CrawlSpider

import copy
import logging
import logging.handlers

from BBSSpider.dto.db_utils import LectureUtils, ForumUtils
from BBSSpider import settings
from BBSSpider.items import *
from BBSSpider.spiders import forums_spider_utils


class BbsSpider(CrawlSpider):
    name = 'BBSSpider'

    allowed_domains = [
        'xjtu.edu.cn'
    ]

    rules = (

    )

    def start_requests(self):
        # 爬取讲座信息
        # yield scrapy.Request(urls_join(settings.START_URL_BASE, settings.LECTURE_URL), self.parse_lecture)
        yield scrapy.Request(urls_join(settings.START_URL_BASE, settings.LINK_LEFT), self.parse_forums)

    # 记录日志信息
    lecture_log_file = 'log/lecture.log'
    lecture_log_fmt = '%(asctime)s : %(name)s - %(message)s'

    logger = logging.getLogger('lecture_log')
    lecture_handler = logging.handlers.RotatingFileHandler(lecture_log_file,
                                                               maxBytes=1024 * 1024,
                                                               encoding='utf-8',
                                                               backupCount=5)
    lecture_fmt = logging.Formatter(lecture_log_fmt)
    lecture_handler.setFormatter(lecture_fmt)

    logger.addHandler(lecture_handler)
    logger.setLevel(logging.INFO)

    global max_sequence
    lecture_utils = LectureUtils()
    max_sequence = lecture_utils.get_lecture_max_sequence(lecture_utils)

    def parse_lecture(self, response, logger=logger):  # 处理讲座信息的入口
        # 判断是否需要更新
        is_update = True

        for content in reversed(response.selector.xpath("//td[@class='level1']/table/tbody/tr")[1:]):
            lecture_item = LectureItem()  # 存储每条信息

            item_list1 = content.xpath('./td/text()').extract()
            sequence = item_list1[0]
            lecture_item['sequence'] = int(sequence)
            lecture_item['date'] = item_list1[2]

            item_list2 = content.xpath("./td/a/text()").extract()
            lecture_item['author'] = item_list2[0]
            lecture_item['title'] = item_list2[1]

            lecture_item['image_urls'] = []

            detail_link = content.xpath("./td/a/@href").extract()[1]
            content_url = urls_join(settings.START_URL_BASE, detail_link)
            logger.info(' content url: %s', content_url)

            request = scrapy.Request(content_url, callback=self.get_content)
            request.meta['item'] = lecture_item
            yield request

            if sequence and int(sequence) < int(max_sequence):  # 如果sequence比上次爬取的sequence小，不必更新
                is_update = False
                break

        pre_link_list = response.selector.xpath(u"//a[contains(@title,'上一页 ')]/@href").extract_first() \
                        or response.selector.xpath(u"//a[contains(@title,'第一页 ')]/@href").extract_first()


        if is_update and pre_link_list:
            logger.info(' pre_link url: %s', pre_link_list)
            yield scrapy.Request(urls_join(settings.START_URL_BASE, pre_link_list), self.parse_lecture)

    def parse_forums(self, response):
        forums_log_file = 'log/forums.log'
        forums_log_fmt = '%(asctime)s : %(name)s - %(message)s'

        logger = logging.getLogger('forums_log')
        forums_handler = logging.handlers.RotatingFileHandler(forums_log_file,
                                                               maxBytes=1024 * 1024,
                                                               encoding='utf-8',
                                                               backupCount=5)
        lecture_fmt = logging.Formatter(forums_log_fmt)
        forums_handler.setFormatter(lecture_fmt)

        logger.addHandler(forums_handler)
        logger.setLevel(logging.INFO)

        for (link, category) in forums_spider_utils.get_left_item_and_href(response):
            # forums = ForumsItem()
            # forums['category'] = category
            logger.info('left_item_link[%s]: %s', category, link)
            request = scrapy.Request(link, callback=self.parse_category)
            # request.meta['item'] = forums
            request.meta['category'] = category
            yield request

    def parse_category(self, response):
        logger = logging.getLogger('forums_log')
        category = response.meta['category']
        forums = ForumsItem()
        for (item, next_link, has_sub) in forums_spider_utils.get_formus_header(response):
            logger.info('sub_category_link: %s', next_link)
            if has_sub: # 如果是二级板块
                yield scrapy.Request(next_link, callback=self.parse_category,
                                     meta={'category':category.join([':',item['forum_name']])})
            else:
                forums = copy.copy(item)
                forums['category'] = category

                # 若每个板块中的文章数没有变化，则不更新
                max_sequence = ForumUtils.get_popularity_by_sub_category(ForumUtils, category)  # 上次更新后板块中的文章的最大序号
                if forums['article_num'] > max_sequence:
                    yield scrapy.Request(next_link, callback=self.parse_article, meta={'item':forums,
                                                                                       'max_sequence': max_sequence})

    def parse_article(self, response):
        forums = response.meta['item']
        max_sequence = response.meta['max_sequence']

        for (item, next_link) in forums_spider_utils.get_article_replay_content(response, max_sequence):
            item['header'] = forums
            request = scrapy.Request(next_link, callback=self.parse_article_detail)
            request.meta['item'] = item
            request.meta['max_sequence'] = max_sequence

            yield request

    def parse_article_detail(self, response):
        logger = logging.getLogger('forums_log')
        item = response.meta['item']
        max_sequence = response.meta['max_sequence']

        if item and isinstance(item, ArticleItem):
            article = ArticleItem()
            # next_link'同一主题列表链接'，用于构建帖子下面的回复
            article, next_link = forums_spider_utils.get_article_replay_detail(response, article)
            article['sequence'] = item['sequence']
            article['title'] = item['title']
            article['author'] = item['author']
            article['header'] = item['header']
            logger.info('article_link: %s', next_link)
            request = scrapy.Request(next_link,self.build_replay)
            request.meta['item'] = article

            yield request

        elif item and isinstance(item, ReplyItem):
            replay = ReplyItem()
            replay, next_link = forums_spider_utils.get_article_replay_detail(response, replay)
            replay['sequence'] = item['sequence']
            replay['title'] = item['title']
            replay['author'] = item['author']

            logger.info('replay_link: %s', next_link)
            request = scrapy.Request(next_link, self.build_article)
            request.meta['item'] = replay
            request.meta['max_sequence'] = max_sequence

            yield request

        pre_link = forums_spider_utils.get_article_pre_page(response)
        if pre_link:
            logger.info('pre_article_page: %s', pre_link)
            yield scrapy.Request(pre_link, callback=self.parse_article, meta={'item':replay, 'max_sequence': max_sequence})


    def build_replay(self, response):
        '''
        构建某个帖子的回复列表
        :param response:
        :return:
        '''
        article = response.meta['item']
        replays = forums_spider_utils.bulid_replay_to_article(response)
        article['replys'] = replays

        yield article

    def build_article(self, response):
        replay = response.meta['item']
        max_sequence = response.meta['max_sequence']

        replay['parent_sequence'] = forums_spider_utils.build_article_to_replay(response, max_sequence, replay['sequence'])

        yield replay

    @staticmethod
    def get_content(response):  # 获取讲座详细信息
        item = response.meta['item']
        item['details'] = ''.join(response.selector.xpath("//div[@id='filecontent']/text()").extract())
        yield item


def urls_join(*parts):
    return "".join(parts)