# coding:utf-8

import scrapy
from scrapy.selector import Selector
from ..items import NewsItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ..spiders import utils_crawler

source = u'环球网'


class HuanQiuSpider(CrawlSpider):
    name = "huanqiu"
    allowed_domains = ["opinion.huanqiu.com"]
    start_urls = [
        'http://opinion.huanqiu.com/roll.html'
    ]
    # 定义爬取URL的规则
    rules = [
        # 抓取1页内容
        # Rule(LinkExtractor(allow=("/roll.html")), follow=True, callback='parse_item')
        # 抓取n页内容
        Rule(LinkExtractor(allow=("/roll\_?([2-5])?.html")), follow=True, callback='parse_item')
    ]

    def parse_item(self, response):
        item_urls = []
        sel = Selector(response)
        hrefs = sel.xpath('//div[@class="fallsFlow"]//a/@href').extract()
        for href in hrefs:
            if href.find('.html') != -1:
                item_urls.append(href)
        for item_url in item_urls:
            yield scrapy.Request(url=item_url, callback=self.parse_details)

    def parse_details(self, response):
        item = NewsItem()
        sel = Selector(response)

        item['title'] = sel.xpath('//div[@class="conText"]/h1/text()').extract()[0]
        item['href'] = response.url
        item['time'] = sel.xpath('//strong[@id="pubtime_baidu"]/text()').extract()[0]
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@class="text"]//p/text()').extract())
        item['source'] = source
        img_list = []
        imgs = sel.xpath('//div[@class="text"]//img/@src').extract()
        for img in imgs:
            if img.find("http://himg2.huanqiu.com/attachment2010/") == -1:
                img_list.append(img)
        item['image_urls'] = img_list
        return item
