# -*- coding: utf-8 -*-
import scrapy
import json
from pprint import pprint
import re
import urllib.parse
import copy
import chardet

class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']
    start_urls = ['https://dc.3.cn/category/get?&callback=getCategoryCallback']

    def parse(self, response):
        # print(response.url)
        # 转化为规范的json
        res_str = response.body.decode('gbk')[20:-1]
        # 获取json数据
        res_dict = json.loads(res_str)

        # TODO 暂时取只有一个数据的大分类列表【此处为手机】
        phone_obj= res_dict['data'][1]['s'][0]
        # TODO 暂时取只有一个数据的中间分类列表
        mid_list = phone_obj['s'][:1]
        items_list=[]
        for mid_cate in mid_list:
            # TODO 暂时取只有一个数据的小分类列表
            min_list=mid_cate['s'][:1]
            for min_cate in min_list:
                item = {}
                # 获取大分类和地址
                item['max_cate_href'], item['max_cate'] = re.findall(r'(?P<max_cate>.*?)\|(?P<max_cate_href>.*?)\|', phone_obj['n'], re.S)[0]
                # 获取中间分类和地址
                item['mid_cate_href'], item['mid_cate'] = re.findall(r'(?P<mid_cate>.*?)\|(?P<mid_cate_href>.*?)\|', mid_cate['n'], re.S)[0]
                # 获取小分类和地址
                item['min_cate_href'], item['min_cate'] = \
                re.findall(r'(?P<mid_cate>.*?)\|(?P<mid_cate_href>.*?)\|', min_cate['n'], re.S)[0]
                items_list.append(item)
                # 小分类url的拼接,分情况讨论
                if "-" not in item['min_cate_href']:
                    min_cate_href_demo="https://list.jd.com/list.html?cat=9987,653,655"
                    # print(item['min_cate_href'])
                    item['min_cate_href']=urllib.parse.urljoin(min_cate_href_demo,item['min_cate_href'])
                    # print(item['min_cate_href'])
                else:
                    item['min_cate_href'] =re.sub('\-',',',item['min_cate_href'])
                    item['min_cate_href'] ="https://list.jd.com/list.html?cat={}".format(item['min_cate_href'])
                    # print(item['min_cate_href'])
                yield scrapy.Request(
                    item['min_cate_href'],
                    meta={'item':copy.deepcopy(item)},
                    callback=self.skus_list,
                    # 有很多域名不再jd.com下
                    dont_filter=True

                )

    def skus_list(self,resopnse):
        # 商品的地址，图片，价格，评价数，介绍，商家，标签（价格，评价数，介绍，商家）
        # sku_detail_demo="https://item.jd.com/7321794.html"
        item = resopnse.meta['item']
        skus_list=resopnse.xpath('//li[@class="gl-item"]')
        skus=[]
        # TODO 暂时取有2个数据的商品列表数据
        for sku in skus_list[:2]:
            s_item={}
            # 商品的地址"//item.jd.com/100002544828.html"
            s_item['sku_detail_address'] = "https:"+sku.xpath('.//a/@href').extract_first()
            s_item['sku_img']= "https:"+sku.xpath('.//img/@src').extract_first()
            s_item['sku_content'] =sku.xpath('.//a[@target="_blank"]/em/text()').extract_first().split()
            s_item['sku_shop_name']=sku.xpath('.//div[@class="p-shop"]/@data-shop_name').extract_first()

            yield scrapy.Request(
                s_item['sku_detail_address'],
                callback=self.skus_detail,
                meta={'s_item':copy.deepcopy(s_item)},
                dont_filter=True
            )
            skus.append(s_item)

        item['skus']=skus
        yield item



        next_url_ = resopnse.xpath('//a[text()="下一页"]/@href').extract_first()
        next_url  = urllib.parse.urljoin("https://list.jd.com/list.html?cat=9987,653,655",next_url_)

        # 翻页
        if next_url is not None:
            yield scrapy.Request(
                next_url,
                callback=self.skus_list,
                meta={'item': copy.deepcopy(item)},
                dont_filter=True
            )


    def skus_detail(self,resopnse):
        """处理详情页数据：评价数，价格"""
        d_item=resopnse.meta['s_item']
        comment_num_url="https://club.jd.com/comment/productCommentSummaries.action?referenceIds=7321794&callback=jQuery7560738&_=1553931750831"

        yield scrapy.Request(
            comment_num_url,
            callback=self.comment_num_url_parse,
            meta={'d_item': d_item},
            dont_filter=True
        )
        # 价格
        try:
            html_res=resopnse.body.decode('gbk')
        except Exception:
            # gbk不能涵盖所有的中文汉字，使用gb18030
            with open('1.txt','w',encoding="gbk") as f:
                html_res = resopnse.body.decode("gb18030")
                f.write(html_res)

        try:
            skuId = resopnse.url.split('/')[-1].split('.')[0]
            cat = re.search(r'\?cat=(.*?)&', html_res).group(1)
            venderId = re.search(r'venderId:(.*?),', html_res).group(1)
            price_url = "https://c0.3.cn/stock?skuId={}&cat={}&venderId={}&area=17_1381_0_0&buyNum=1&choseSuitSkuIds=&extraParam={}&ch=1&fqsp=0&pduid=1545032138192466587808&pdpin=&callback=jQuery9747304".format(
                skuId, cat, venderId,"{%22originid%22:%221%22}")
            yield scrapy.Request(
                price_url,
                callback=self.price_parse,
                meta={'d_item': d_item},
                dont_filter=True
            )

        except Exception:
            print("价格获取失败,出现错误的地址如下:\n",resopnse.url)
            d_item['price']=['null']
            yield d_item


    # 爬取sku商品的评论数
    def comment_num_url_parse(self,resopnse):
        d_item = resopnse.meta['d_item']
        res_bytes = resopnse.body
        res_str =res_bytes.decode('gbk')[14:-2]
        res_dict =json.loads(res_str)
        d_item['comments_count']=res_dict['CommentsCount'][0]["GoodCountStr"]
        yield d_item


    def price_parse(self,resopnse):
        print("---------")
        d_item = resopnse.meta['d_item']
        res_bytes = resopnse.body
        res_str = res_bytes.decode('gbk')
        try:
            res_dict = json.loads(res_str[14:-1])
            with open('e.txt', 'w', encoding='gbk') as f:
                f.write(res_str)

        except Exception:
            res_dict = json.loads(res_str)

        finally:
            price= res_dict['stock']["jdPrice"]
            d_item['price']={'orgin_price':price.get('op',None),'new_price':price.get('p',None)}
            if price.get('sfp',None):
                d_item['price']['sfp']=price.get('sfp',None)
            print(d_item['price'])
            yield d_item
