import scrapy
import re
import json

class SpiderSpider(scrapy.Spider):
    name = 'spider'
    allowed_domains = ['jd.com']
    start_urls = ['http://jd.com/']

    def start_requests(self):
        list = ['华为（HUAWEI）','荣耀（honor）','Apple','小米（MI）','OPPO','vivo',
                '真我（realme）','三星（SAMSUNG）','iQOO']
        for i in list:
            url = 'https://list.jd.com/list.html?cat=9987%2C653%2C655&ev=exbrand_'+str(i)
            print(url)
            yield scrapy.Request(url,callback=self.get_pages,meta={'url':url},dont_filter=True)
            break

    def get_pages(self,response):
        url = response.meta['url']
        for i in range(1,11):
            url = url+'&page='+str(i)
            yield scrapy.Request(url, callback=self.parse, dont_filter=True)
            # break

    def parse(self, response):
        item = {}
        hrefs = response.xpath('//div[ @ id = "J_goodsList"]/ul//li/@data-sku').getall()
        for href in hrefs:
            url = 'https://item.jd.com/'+str(href)+'.html'
            print(url)
            item['pid'] = href
            yield scrapy.Request(url,callback=self.get_infos,meta={'item':item},dont_filter=True)
            # break

    def get_infos(self,response):
        item = response.meta['item']

        try:
            item['title'] = re.findall(r'<div class="sku-name">\s+(<img.*?/>)?\s+(.*?)\s+</div>', response.text,re.DOTALL)[0][1]
            # item['title'] = re.sub('\s','',item['title'])
        except:
            item['title'] = ''
        try:
            item['brand'] = re.findall(r"<li.*?clstag='shangpin\|keycount\|product\|pinpai_1' target='_blank'>(.*?)</a>",response.text,re.DOTALL)[0]
        except:
            item['brand'] = ''
        try:
            item['time'] = re.findall(r'<dt>上市年份</dt><dd>(.*?)</dd>',response.text,re.DOTALL)[0]+re.findall(r'<dt>上市月份</dt><dd>(.*?)</dd>',response.text,re.DOTALL)[0]
        except:
            item['time'] = ''
        try:
            item['cpu'] = re.findall(r'<li.*?>CPU型号：(.*?)</li>',response.text,re.DOTALL)[0]
        except:
            item['cpu'] = ''
        try:
            item['memory'] = re.findall(r'<li.*?>运行内存：(.*?)</li>',response.text,re.DOTALL)[0]
        except:
            item['memory'] = ''
        try:
            item['storage'] = re.findall(r'<li.*?>机身存储：(.*?)</li>',response.text,re.DOTALL)[0]
        except:
            item['storage'] = ''

        try:
            item['screen'] = re.findall(r'<li.*?>主屏幕尺寸\（英寸\）：(.*?)</li>',response.text,re.DOTALL)[0]
        except:
            item['screen'] = ''
        try:
            item['name'] = re.findall(r'<li.*?>商品名称：(.*?)</li>',response.text,re.DOTALL)[0]
        except:
            item['name'] = ''
        try:
            item['name_en'] = re.findall(r'<p>工业代号或者入网型号</p>\s+</div>\s+</div>\s+</dd>\s+<dd>(.*?)</dd>',response.text,re.DOTALL)[0]
        except:
            item['name_en'] = ''
        yield scrapy.Request('https://p.3.cn/prices/mgets?skuIds=J_'+str(item['pid']),callback=self.get_price,meta={'item':item},dont_filter=True)

    def get_price(self,response):
        item = response.meta['item']
        data = json.loads(response.text)
        item['price'] = data[0]['op']
        yield scrapy.Request('https://club.jd.com/comment/productCommentSummaries.action?referenceIds='+str(item['pid']),callback=self.get_comment,meta={'item':item},dont_filter=True)

    def get_comment(self,response):
        item = response.meta['item']
        data = json.loads(response.text)
        item['GoodCountStr'] = data['CommentsCount'][0]['GoodCountStr']
        item['GeneralCountStr'] = data['CommentsCount'][0]['GeneralCountStr']
        item['PoorCountStr'] = data['CommentsCount'][0]['PoorCountStr']
        print(item)
        yield item