# -*- coding: utf-8 -*-
__author__ = 'Yh'

from BBSSpider.dto.db_utils import ForumUtils

from BBSSpider.settings import START_URL_BASE
from BBSSpider.items import ForumsItem, ArticleItem, ReplyItem

from scrapy.selector import Selector

import time


# 返回分类讨论区中的类别和链接
def get_left_item_and_href(response):
    div = response.selector.xpath("//*[@id='div1']/a")
    hrefList = div.xpath("./@href").extract()
    textList = div.xpath("./text()").extract()
    for (href, text) in zip(hrefList, textList):
        yield (START_URL_BASE + href, text)


# 论坛的第二层爬取
def get_formus_header(response):
    '''
    :param response:
    :return:(本层爬取得forums内容， 下一层链接， 是否是二级版面)
    '''
    # 由于表头和第一行结构和其他行不一样，所以分开处理
    # 处理第一行
    first_line = response.selector.xpath("//table/tbody/tr[2]/td")
    if first_line:
        lt = first_line.xpath('./text()').extract()
        forums = ForumsItem()
        if lt[2] == '[二级版面]':
            forums['sub_category'] = '二级版面'
            forums['forum_name'] = first_line.xpath('./a/text()').extract()[1]
            link = first_line.xpath('./a/@href').extract()[1]
            yield (forums, START_URL_BASE + link, True)
        else:
            forums['sub_category'] = lt[2][1:-1]
            forums['article_num'] = int(lt[-3])
            forums['popularity'] = int(lt[-2])

            ot = first_line.xpath('./a/text()').extract()
            forums['chinese_name'] = ot[0]
            forums['forum_name'] = ot[1]
            forums['master'] = ot[2:]

            hrefs = first_line.xpath('./a/@href').extract()
            forums['master_link'] = [START_URL_BASE + x for x in hrefs[2:] if not x.isspace()]

            yield (forums, START_URL_BASE + hrefs[1], False)

    # 处理其他行
    other_lines = response.selector.xpath('//table/tbody/td').extract()
    jlines = [''.join(other_lines[x * 9: (x + 1) * 9]) for x in range(len(other_lines) // 9)]

    for jline in jlines:
        forums = ForumsItem()

        text = Selector(text=jline).xpath('//text()').extract()
        links = Selector(text=jline).xpath('//a/@href').extract()
        if text[3] == '[二级版面]':
            forums['sub_category'] = '二级版面'
            forums['forum_name'] = text[4]

            yield (forums, START_URL_BASE + links[0], True)
        else:
            forums['sub_category'] = text[3][1:-1]
            forums['chinese_name'] = text[1]
            forums['forum_name'] = text[4]
            forums['master'] = [x for x in text[5:-3] if not x.isspace()]
            forums['article_num'] = int(text[-3])
            forums['popularity'] = int(text[-2])
            forums['master_link'] = [x for x in links[2:] if not x.isspace()]

            yield (forums, START_URL_BASE + links[0], False)


def get_article_replay_content(response, max_sequence):
    '''
    遍历板块中的帖子的列表
    :param response:
    :return:article、replay和帖子URL
    '''
    tds = reversed(response.selector.xpath('//table/tbody/tr')[1:])   # 过滤表头
    for td in tds:
        num = td.xpath('./td/text()').extract()[0]
        if num and num.isdigit():
            article = ArticleItem()
            sequence = int(num)
            if sequence > max_sequence:
                hreftexts = td.xpath('./td/a/text()').extract()
                title = hreftexts[-1]
                author = hreftexts[0]
                link = START_URL_BASE + td.xpath('./td/a/@href').extract()[-1]

                if title.startswith('Re:'):  # 此条为回复，处理回复
                    replay = ReplyItem()
                    replay['sequence'] = sequence
                    replay['author'] = author
                    replay['title'] = title

                    yield (replay, link)
                else:
                    article = ArticleItem()
                    article['sequence'] = sequence
                    article['author'] = author
                    article['title'] = title

                    yield (article, link)
            else:
                break


def get_article_replay_detail(response, item):
    '''
    :param response:
    :return:
    '''

    div = response.selector.xpath("//*[@id='filecontent']")
    content = div.xpath('./text()')
    re = '[\w]{3}\s+[\w]{3}\s+[\d]{1,2}\s+[0-9:]{8}\s+[\d]{4}'
    date = content.re(re)[0]
    item['date'] = time.mktime(time.strptime(date,"%a %b %d %H:%M:%S %Y"))
    item['detail'] = ''.join(div.xpath('./text()').extract())

    #若存在图片，保存图片并返回图片的objectID
    imgs = div.xpath('./a/img/@src').extract()
    if imgs:
        image_names = []
        for img in imgs:
            imgname = img.split('/')[-1]
            image_names.append(imgname)
        item['image_urls'] = imgs
        item['image_names'] = image_names
    else:
        item['image_urls'] = []
        item['image_names'] = []

    # 抽取同主题列表的链接
    theme_list = response.selector.xpath('//table/tr/td/div/div/a/@href').extract()[2]

    return item, START_URL_BASE + theme_list


def bulid_replay_to_article(response):
    replay_sequences = response.selector.xpath('//table/tr/td[1]/text()').extract()[2:]
    if replay_sequences:
        return [int(x) for x in replay_sequences]
    else:
        return []

def build_article_to_replay(response, max_sequence, item_sequence):
    parent_sequence = int(response.selector.xpath('//table/tr/td[1]/text()').extract()[1])

    if not parent_sequence > max_sequence:
        ForumUtils.update_replay_sequence(ForumUtils, parent_sequence, item_sequence)

    return parent_sequence


def get_article_pre_page(response):
    pre_link = response.selector.xpath(u"//a[contains(@title,'上一页 ')]/@href").extract_first() \
                        or response.selector.xpath(u"//a[contains(@title,'第一页 ')]/@href").extract_first()
    if pre_link:
        return START_URL_BASE + pre_link
    return None