# -*- coding: utf-8 -*-
import json

import requests
import scrapy
import sys
from jd_book.items import JdBookItem

class JdBookSpiderSpider(scrapy.Spider):
    reload(sys)
    sys.setdefaultencoding('utf-8')
    name = "jd_book_spider"
    start_urls = ['https://list.jd.com/list.html?cat=1713,3282,6587',#航空航天
                  'https://list.jd.com/list.html?cat=1713,3282,3708',#自动化技术
                  ]
    #https://list.jd.com/list.html?cat=1713,3282,3700#化学工业
    #https://list.jd.com/list.html?cat=1713,3282,3710#金属学与金属工艺
    #https://list.jd.com/list.html?cat=1713,9340,9344 #天文航天,格式不一致
    #https://list.jd.com/list.html?cat=1713,3286,3784 #化学
    #https://list.jd.com/list.html?cat=1713,3286,3783 #物理学
    #https://list.jd.com/list.html?cat=1713,3282,6582 #机械。仪表工业

    def parse(self, response):
        field0 = response.xpath("//div[@id='J_selector']/div[@class='s-title']/h3/b/text()").extract_first()
        book = JdBookItem()
        book['field']=field0
        booksNodes = response.xpath("//div[@id='plist']/ul/li[@class='gl-item']")
        for bookNode in booksNodes:
            urldetail = response.urljoin(bookNode.xpath("div/div[@class='p-name']/a/@href").extract_first())
            print "=====================URL==============================="
            print urldetail
            yield scrapy.Request(urldetail,meta={'item': book},callback=self.parse_content)

        next_page = response.urljoin(response.xpath("//div[@id='J_bottomPage']/span[@class='p-num']/a[@class='pn-next']/@href").extract_first())
        if next_page is not None:
            print "=====================next Page ==============================="
            print next_page
            yield scrapy.Request(next_page,callback=self.parse)

    def parse_content(self,response):
        print "==================================detail started!==========================="
        book = response.meta['item']
        bookid =response.xpath("//a[@id='choose-btn-coll']/@data-id").extract_first()
        book['siteid'] = bookid
        book["field"] = response.xpath("//div[@id='root-nav']/div/div/span[1]/a[2]/text()").extract_first().strip()
        book["url"] = response.urljoin(
            response.xpath("//div[@id='root-nav']/div/div/span[2]/a/@href").extract_first().strip())
        book["title"] = response.xpath("//div[@id='name']/h1/text()").extract_first().strip()
        book["image_url"] = response.xpath("//div[@id='spec-n1']/img/@src").extract_first().strip()
        book["authors"] = response.xpath("//div[@id='p-author']/a/text()").extract()
        book["authors_extraData"] = response.xpath("//div[@id='p-author']/text()").extract()
        book["others"] = response.xpath("//ul[@id='parameter2']/li/text()").extract()
        jsonUrl = ['https://club.jd.com/comment/productCommentSummaries.action?referenceIds=' + bookid,
                   'https://club.jd.com/clubservice/newcomment-Club-' + bookid + '.html?callback=fetchJSON_Discuss',
                   'https://dx.3.cn/desc/' + bookid + '?cdn=2&callback=showdesc']
        comment0 = self.parse_comment(jsonUrl[0], 0)
        comment1 = self.parse_comment(jsonUrl[1], 1)
        comment2 = self.parse_comment(jsonUrl[2], 2)
        book['comment_static_info'] = comment0
        book['comments'] = comment1
        book['description'] = comment2
        yield book

    def parse_comment(self,url,type):
        r = requests.get(url)
        if r.status_code ==200 :
            if type==0:
                return str(r.json()['CommentsCount'])
            elif type==1:
                str1 = str(r.text)
                return str1
            elif type==2:
                str2 = r.text
                return str2
            else: return None
        else:return None
