import scrapy
from scrapy import cmdline
from fb import items
from scrapy_redis.spiders import RedisSpider

class DdSpider(RedisSpider):
    name = 'dd'
    # allowed_domains = ['dangdang.com']
    # start_urls = ['http://search.dangdang.com/?key=python&act=input']
    #lpush dd:start_urls http://search.dangdang.com/?key=python&act=input
    redis_key = 'dd:start_urls'

    # 提取规则
    rules = {
        'title': './/p[@class="name"]/a/@title',
        'price': './/p[@class="price"]/span[@class="search_now_price"]/text()',
        'detail': './/p[@class="search_book_author"]/span[3]/a/text()',
        'common': './/p[@class="search_star_line"]/a/text()',
        'digest': './/p[@class="detail"]/text()',
        'page': '//div[@class="paging"]/ul/li[@class="next"]/a/@href',
        'info_list': '//div[@id="search_nature_rg"]/ul/li'
    }

    def parse(self, response):
        info_list = response.xpath(self.rules['info_list'])
        # data = []
        for i in info_list[1:5]:
            # 迭代 提取数据  保存
            item = items.FbItem()
            item['title'] = self.get_title(i)
            item['price'] = self.get_price(i)
            item['detail'] = self.get_detail(i)
            item['common'] = self.get_common(i)
            item['digest'] = self.get_digest(i)
            # data.append(item)
            yield item
        #保存到JSON文件
        # import json
        # with open(r'D:\pacong\TulingPaCong\fb.json','wt',encoding='utf8') as f:
        #     data1 = json.dumps(data,ensure_ascii = False)
        #     f.write(data1)
        #     f.write('\r\n')


        self.logger.info(response.text)

    def get_title(self, x):
        # 标题
        title = x.xpath(self.rules['title']).extract_first()
        return title

    def get_price(self, x):
        # 价格
        price = x.xpath(self.rules['price']).extract_first()
        return price

    def get_detail(self, x):
        # 详情
        detail = x.xpath(self.rules['detail']).extract_first()
        return detail

    def get_common(self, x):
        # 评论
        common = x.xpath(self.rules['common']).extract_first()
        return common

    def get_digest(self, x):
        # 描述
        text = x.xpath(self.rules['digest']).extract_first()
        return text


if __name__ == '__main__':
    cmdline.execute('scrapy crawl dd'.split())
