import datetime
import scrapy
from scrapy.selector import Selector
from ..items import NewsItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ..spiders import utils_crawler

source = u'人民网'


class PeopleCommentSpider(CrawlSpider):
    name = "people_comment"
    allowed_domains = ["opinion.people.com.cn"]
    start_urls = [
        'http://opinion.people.com.cn/GB/159301/index.html'
    ]
    rules = [  # 定义爬取URL的规则
        # 抓取n页内容
        Rule(LinkExtractor(allow=("/GB/159301/index([\d]+).html")), follow=True, callback='parse_item')
    ]

    def parse_item(self, response):
        item_urls = []
        sel = Selector(response)
        hrefs = sel.xpath('//div[@class="fl p2j_list"]//a/@href').extract()
        for href in hrefs:
            href_url = 'http://' + PeopleCommentSpider.allowed_domains[0] + href
            if href_url.find('http://opinion.people.com.cn/n1') != -1:
                item_urls.append(href_url)

        for item_url in item_urls:
            yield scrapy.Request(url=item_url, callback=self.parse_details)

    def parse_details(self, response):
        item = NewsItem()
        sel = Selector(response)

        item['title'] = sel.xpath('//div[@class="clearfix w1000_320 text_title"]/h1/text()').extract()[0]
        item['href'] = response.url
        item['time'] = str(datetime.datetime.now())
        item['content'] = utils_crawler.deal_content(sel.xpath('//div[@id="rwb_zw"]//p/text()').extract())
        item['source'] = source
        img_list = []
        img_urls = sel.xpath('//div[@id="rwb_zw"]//img/@src').extract()
        for img in img_urls:
            if img.find('http://') != -1:
                img_list.append(img)
        item['image_urls'] = img_list

        return item
