# coding:utf-8

import scrapy
from scrapy.selector import Selector
from ..items import NewsItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ..spiders import utils_crawler

source = u'观察者网'


class GuanChaSpider(CrawlSpider):
    name = "guancha"
    allowed_domains = ["www.guancha.cn"]
    start_urls = [
        "http://www.guancha.cn/mainnews-sp/list_1.shtml"
    ]
    rules = [ # 定义爬取URL的规则
        Rule(LinkExtractor(allow=("/mainnews-sp/list_([\d]+).shtml")), follow=True, callback='parse_item')
    ]

    def parse_item(self, response):
        item_urls = []
        sel = Selector(response)

        hrefs = sel.xpath('//div[@class="search_result"]//a/@href').extract()
        for href in hrefs:
            if href.find('.shtml') != -1:
                item_urls.append(href)
        for item in item_urls:
            item_url = 'http://' + GuanChaSpider.allowed_domains[0] + item
            yield scrapy.Request(url=item_url, callback=self.parse_details)

    def parse_details(self, response):
        item = NewsItem()
        sel = Selector(response)

        item['title'] = sel.xpath('//h2[@class="content-title1"]/text()').extract()[0].strip()
        item['href'] = response.url
        item['time'] = sel.xpath('//div[@class="txt-info1"]/p/text()').extract()[0].encode('utf-8').replace('发表时间：', '')
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@class="all-txt"]//p/text()').extract())
        item['source'] = source
        item['image_urls'] = sel.xpath('//div[@class="all-txt"]//img/@src').extract()

        return item
