# coding:utf-8
import scrapy
from scrapy.selector import Selector
from scrapy.spiders import Spider
from ..items import NewsItem
from ..spiders import utils_crawler

source = u'求是网'


class BwtjSpider(Spider):
    name = "bwtj"
    base_domains = "http://www.qstheory.cn/"
    start_urls = [
        "http://www.qstheory.cn/bwtj/index.htm",
    ]

    def parse(self, response):
        item_urls = []
        sel = Selector(response)
        iframe = sel.xpath('/html/body/div[2]/div/div[2]/div[1]/div[2]/div[1]/iframe/@src').extract()[0]
        item_urls.append(BwtjSpider.base_domains + iframe.replace('../', '').replace('\'', '%27') + '&page=1')

        for item_url in item_urls:
            yield scrapy.Request(url=item_url, callback=self.parse_next)

    def parse_next(self, response):
        item_urls = []
        sel = Selector(response)
        hrefs = sel.xpath('//h3//script[@type="text/javascript"][1]//text()').extract()
        for href in hrefs:
            item_urls.append(href[href.rfind('http://'): href.rfind('.htm') + 4])
        print(item_urls)
        for item_url in item_urls:
            yield scrapy.Request(url=item_url, callback=self.parse_details)

    def parse_details(self, response):
        item = NewsItem()
        sel = Selector(response)

        item['title'] = sel.xpath('//div[@class="main"]/h1/text()').extract()[0].strip()
        item['href'] = response.url
        item['time'] = utils_crawler.deal_date(sel.xpath('//div[@class="metadata"]/text()').extract()[0])
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@class="highlight"]//p/text()').extract())
        item['source'] = source
        item['image_urls'] = sel.xpath('//div[@class="highlight"]//img/@src').extract()

        return item
