# -*- coding: utf-8 -*-

# 太平洋，新奥拓论坛

import scrapy
import re
from scrapy import Request
from pcauto.items import PcautoItem
import datetime
from scrapy.loader.processors import MapCompose, Join


class ForumSpider(scrapy.Spider):
    name = "forum"
    allowed_domains = ["pcauto.com.cn"]
    # 自行填写论坛名字和分类
    forum_name = [u'太平洋汽车网']
    brand = [u"铃木"]
    car_type = [u"新奥拓"]
    main_type = [u"车问答"]

    # 论坛入口网址，不同的需要修改
    start_urls = (
        'http://bbs.pcauto.com.cn/wenda-16488.html',
    )
    # 分类网址对应的名称

    def parse(self, response):
        sub_type_selector = response.xpath('//*[@id="JFilterCont"]//a')
        for selector in sub_type_selector[1:]:
            url = selector.xpath('@href').extract()[0]
            sub_type = selector.xpath('text()').extract()
            yield Request(url=url, meta={'sub_type': sub_type}, callback=self.parse_sub_page)

    # 分类论坛页面
    def parse_sub_page(self, response):

        sub_type = response.meta.get('sub_type')
        next_selector= response.xpath('//div[@class="pager"]//a[@class="next"]//@href')

        for url in next_selector.extract():
            yield Request(url=url, meta={'sub_type': sub_type}, callback=self.parse_sub_page)

        topic_selector = response.xpath('//div[@class="question-main"]//div[@class="txt-tit"]//a')

        for selector in topic_selector:
            title = selector.xpath('text()').extract()
            url = selector.xpath('@href').extract()[0]
            topic_id = re.search(r'\d+', url).group(0)

            yield  Request(
                url=url,
                meta={'sub_type': sub_type, 'title': title, 'topic_id': topic_id},
                callback=self.parse_topic
            )

    def parse_topic(self, response):

        sub_type = response.meta.get('sub_type')
        topic_id = response.meta.get('topic_id')
        name = self.forum_name
        brand = self.brand
        car_type = self.car_type
        main_type = self.main_type
        title = response.meta.get('title')


        question = title
        question_img = [u"无"]
        replay_to = [u'楼主的帖子']
        replay_img = [u"无"]
        best_answer = [u"否"]

        next_selector = response.xpath('//div[@id="pager"]//a[contains(@class, "next")]/@href')
        for url in next_selector.extract():
            yield Request(
                url,
                meta={'sub_type': sub_type, 'title': title, 'topic_id': topic_id},
                callback=self.parse_topic
            )

        for selector in response.xpath('//div[@id="post_list"]/div[contains(@class,"post") and not(contains(@class, "post_list_top"))]'):
            # 楼主的提出的问题
            if selector.xpath('table//div[@id="post_floor_1"]'):
                replay_id = [u'楼主']
                replay = selector.xpath('table//div[contains(@class,"post_msg")]//text()').extract()
                if selector.xpath('table//div[contains(@class,"post_msg")]//img'):
                    replay_img = [u'有']

            else:
                if selector.xpath('table//div[@id="post_floor_2"]'):
                    replay_id = [u'沙发']
                elif selector.xpath('table//div[@id="post_floor_3"]'):
                    replay_id = [u'板凳']
                elif selector.xpath('table//div[@id="post_floor_4"]'):
                    replay_id = [u'地板']
                elif selector.xpath('table//div[@id="post_floor_5"]'):
                    replay_id = [u'地下室']
                else:
                    replay_id = selector.xpath('table//div[@class="post_floor"]//text()').extract()
                replay = selector.xpath('table//div[contains(@class,"post_msg")]//text()').extract()
                if selector.xpath('table//div[contains(@class, "best_answer_sign")]'):
                    best_answer = [u'是']
                if selector.xpath('table//span[@class="cite"]//text()'):
                    replay_to = selector.xpath('table//span[@class="cite"]//text()').extract()

                if selector.xpath('table//div[contains(@class,"post_msg")]//img'):
                    replay_img = [u'有']

            i = PcautoItem()

            i['name'] = name
            i['url'] = response.url
            i['brand'] =brand
            i['car_type'] = car_type
            i['type'] = main_type
            i['sub_type'] = sub_type
            i['title'] = title

            i['topic_id'] = topic_id
            i['question'] = question
            i['question_img'] = question_img
            i['replay_id'] =  MapCompose(unicode.strip, unicode.title)(replay_id)
            i['replay_img'] = replay_img
            i['replay'] = MapCompose(unicode.strip, unicode.title)(replay)
            i['best_answer'] = best_answer
            i['replay_to'] = MapCompose(unicode.strip, unicode.title)(replay_to)
            i['datetime'] = [datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')]
            yield i





