# -*- coding: utf-8 -*-
import scrapy
from scrapy.exceptions import DropItem
from reviews.items import ReviewsItem

class PingceSpider(scrapy.Spider):
    name = 'pingce'
    allowed_domains = ['shiche.com.cn']
    url = "http://www.shiche.com.cn/reviews/?p="
    offset = 1 #大坑 url拼接不对，翻页从0开始不是从1开始 看报错
    start_urls = [url+str(offset)]
    def parse(self, response):
        item = ReviewsItem()
        lis = response.xpath("//ul[@class='doc-list-label']/li")
        for each in lis:
            href = each.xpath("./a/@href")#子项开头加.点
            title = each.xpath("./h4/a/text()")
            des = each.xpath("./p/text()")#todo 处理空格
            author = each.xpath("./div[@class='handle']/span[1]/text()")
            time = each.xpath("./div[@class='handle']/span[2]/text()")
            # hits= each.xpath("./div[@class='fn-right']/text()").extract()[0]
            # hits= each.xpath("./div[@class='fn-right']/i/following::text()") #todo 无法取到数据
            thumb = each.xpath("./a/img/@src")

            item['href'] = self.isEmpty(href)
            item['title'] = self.isEmpty(title).strip()
            item['des'] = self.isEmpty(des).strip()
            item['author'] = self.isEmpty(author)
            item['time'] = self.isEmpty(time)
            item['thumb'] = self.isEmpty(thumb)

            yield item
        # 写翻页爬虫逻辑
        if self.offset <7:
           self.offset+=1
        else:
            raise DropItem('爬取完成')

        yield scrapy.Request(self.url+str(self.offset),callback=self.parse)

    # 解决xpath取不到数据的问题
    def isEmpty(self,obj):
        if obj:
            obj = obj.extract()[0]
        else:
            obj = ''

        return  obj