# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import DDItem


import re
import urllib.request
import json
import requests
from lxml import etree

# 这里我 去掉了重复筛查  修改的源码
# scrapy.Request()


class DdbookSpider(CrawlSpider):
    name = 'DDbwook'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://category.dangdang.com/cp01.49.08.14.00.00.html']
    rules = (
        Rule(LinkExtractor(allow=r'/(pg\d\-)?cp01.49.08.14.00.00.html'), follow=True),
        Rule(LinkExtractor(allow=r'product.dangdang.com/\d+.html', restrict_xpaths=("//p[@class='name']/a")), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        print('实例化item====================')
        item = DDItem()  # 实例化item
        # item["category"] = response.xpath('//*[@id="breadcrumb"]/a[1]/b/text()').extract_first() + '>' + response.xpath(
        #     '//*[@id="breadcrumb"]/a[2]/text()').extract_first() + '>' + response.xpath(
        #     '//*[@id="breadcrumb"]/a[3]/text()').extract_first()
        # item["detail"] = json.dumps(response.xpath("//*[@id='detail_describe']/ul//li/text()").extract(),
        #                             ensure_ascii=False)

        # item["img_link"] = json.dumps(response.xpath("//div[@class='img_list']/ul//li/a/@data-imghref").extract())


        try:
            item["ISBN"] = response.xpath("//*[@id='detail_describe']/ul//li[5]/text()").extract()[0].split('：')[1].strip()
            item["name"] = response.xpath("//*[@id='product_info']/div[1]/h1/@title").extract_first()
            item["author"] = ''
            item["press"] = ''
            item["publishingTime"] = ''
            item["typeName"] = ''
            item["typeRank"] = ''
            # 商品 评论数
            item["commentNumber"] = response.xpath("//*[@id='comm_num_down']/text()").extract()[0]
            # item["price"] = ''
            item["price"] = response.xpath("//*[@id='original-price']/text()").extract()[1].strip()
            try:
                item["selling"] = response.xpath("//*[@id='dd-price']/text()").extract()[1].strip()
            except IndexError as e:
                item["selling"] = response.xpath("//*[@id='dd-price']/text()").extract()[0].strip()
            try:
                item["shipper"] = response.xpath("//*[@id='shop-geo-name']/text()").extract()[0].replace('\xa0至', '')
            except IndexError as e:
                item["shipper"] = '当当'
            item["openBook"] = ''
            item["page"] = ''
            item["pcking"] = ''
            item["isSuit"] = ''
            item["classification"] = ''
            item["pic1"] = response.xpath("//*[@id='largePicDiv']/a/img[@id='largePic']/@src").extract_first()
            item["pic2"] = ''
            item["remd1"] = ''
            item["remd2"] = ''
            item["content1"] = ''
            item["content2"] = ''
            item["link"] = response.url

        except IndexError as e:
            print('err  =============')  # 23455796


        # print(item["goods_id"])  # 23455796
        # # print(item["category"])  # 图书>教材>研究生/本科/专科教材
        # print(item["name"])  # 大学生领导力（第3版）
        # print(item["pic1"])  #
        # print(item["link"])
        # print(item["price"])
        # print(item["source"])  # 当当自营
        # print(item["detail"])  # ["开 本：16开", "纸 张：胶版纸", "包 装：平装", "是否套装：否", "国际标准书号ISBN：9787300189048", "丛书名：心理学译丛。教材系列"]
        # print(item["img_link"]) # ["http://img3m6.ddimg.cn/23/16/23455796-1_u_1.jpg"]
        yield item
