# -*- coding: utf-8 -*-
import json
import os
import re
import sys
from configparser import ConfigParser
from urllib import parse

import scrapy
from pydispatch import dispatcher
from scrapy import signals

from spider import settings
from spider.items import ProductItem


class SnSpider(scrapy.Spider):
    """
    TODO 价格请求的接口不一样

    """
    name = 'sn'
    allowed_domains = ['s.taobao.com', 'product.suning.com', 'icps.suning.com', 'suning.com']

    search_url = "https://search.suning.com/{}/"
    page_url = "https://search.suning.com/emall/searchV1Product.do?keyword={}&pg=01&cp={}"
    # https://search.suning.com/emall/searchV1Product.do?keyword=%E9%9B%80%E5%B7%A2&pg=01&cp=1
    # https://search.suning.com/emall/searchV1Product.do?keyword=%E9%9B%80%E5%B7%A2&ci=0&pg=01&cp=1&il=0&st=0&iy=0&hf=brand_Name_FacetAll:%E9%9B%80%E5%B7%A2%28Nestle%29&adNumber=0&isDoufu=1&isNoResult=0&n=1&sc=0&sesab=ACAABAAB&id=IDENTIFYING&cc=731&sub=0&jzq=1048
    after_url = "https://search.suning.com/emall/searchV1Product.do?keyword={}&pg=01&cp={}&paging=1"

    detail_url = "https://product.suning.com/{}/{}.html"
    price_url = "https://icps.suning.com/icps-web/getVarnishAllPriceNoCache/{}_731_7310101_0000000000_1_" \
                "getClusterPrice.jsonp?callback=getClusterPrice"
    price_url2 = "https://pas.suning.com/nspcsale_0_{id2}_{id2}_{id1}_160_731_7310101_500353_1000151_9151_11192_Z001" \
                 "___R9002187_0.978_0___000315743__.html"

    def __init__(self, name, trans_key, signal=None,  *args, **kwargs):
        super(SnSpider, self).__init__(*args, **kwargs)
        self.q = name
        self.page_total = None
        self.trans_key = trans_key
        # 定制信号
        self.item_count = 0
        self.signal = signal
        if self.signal:
            dispatcher.connect(self.spider_closed, signals.spider_closed)
            dispatcher.connect(self.item_scraped, signals.item_scraped)

    def start_requests(self):
        yield scrapy.Request(self.search_url.format(self.q), callback=self.parse_page_total)

    def parse_page_total(self, response):
        # TODO 采用绝对路径，待测试
        self.page_total = response.xpath('/html/body/div[9]/div/div[3]/span/em[2]/text()').extract_first()
        # self.page_total = page_total
        # print("page_total   ", self.page_total)
        # 只测试2页，待删除
        # page_total = 2
        for page in range(int(self.page_total)):
            yield scrapy.Request(url=self.page_url.format(self.q, page), callback=self.parse_page)
            yield scrapy.Request(url=self.after_url.format(self.q, page), callback=self.parse_after)

    def parse_page(self, response):
        product_list = response.xpath('//*[@id="product-list"]/ul/li/@id').extract()
        # print("前30个 ", product_list)
        # 测试，只爬一个，待删
        # product_list = [product_list[0], ]
        for sku_id in product_list:
            sku_id = sku_id.split('-')  # id分为前半部分和后半部分
            # print("将-替换成/", id)
            yield scrapy.Request(url=self.detail_url.format(sku_id[0], sku_id[1]), callback=self.parse_detail,
                                 meta={'id': sku_id, 'flag': 1})

    def parse_after(self, response):
        product_list = response.xpath('//li/@id').extract()
        # print("后30个 ", product_list)
        # TODO 测试，只爬一个，待删
        # product_list = [product_list[0], ]
        for sku_id in product_list:
            sku_id = sku_id.split('-')  # id分为前半部分和后半部分
            yield scrapy.Request(url=self.detail_url.format(sku_id[0], sku_id[1]), callback=self.parse_detail,
                                 meta={'id': sku_id, 'flag': 1})

    def parse_detail(self, response):
        # print("进入parse detail")
        # title = response.xpath('/html/head/title/text()').extract()
        # print(title)
        item = ProductItem()
        item['total'] = int(self.page_total) * 60
        item['source'] = '苏宁易购'
        item['keyword'] = self.q
        item['trans_key'] = self.trans_key
        item['id'] = response.meta['id']
        # TODO 如果不是自营的，只返回一个名字['name']，自营返回['\r\r\r','name']

        name = response.xpath(
            '//*[@id="itemDisplayName"]/text()').extract()  # //*[@id="itemDisplayName"]/text()
        if name[0].strip():
            item['name'] = name[0].strip()
        else:
            item['name'] = name[1].strip()
        # print("名字 ", item['id'][1], item['name'])
        images = response.xpath('//*[@class="imgzoom-thumb-main"]/ul/li/a/img/@src').extract()
        # //imgservice.suning.cn/uimg1/b2c/image/AW54UbuXaGPz2gDqMnwbjA.jpg_60w_60h_4e
        # http://imgservice.suning.cn/uimg1/b2c/image/AW54UbuXaGPz2gDqMnwbjA.jpg_60w_60h_4e
        for i in range(len(images)):
            images[i] = "http:" + images[i]
        # print("images   ", images)
        item['images'] = images
        # TODO 获取价格，价格接口可以批量请求价格，现在只获取一个
        # TODO 不同供应商的参数不一样，待改
        # 价格接口，id要补0补够18位，zfill(width)函数，指定长度，字符串向右对齐，前面补0
        price_id = item['id'][1].zfill(18)
        # print("price_id = ", price_id)
        yield scrapy.Request(url=self.price_url2.format(id2=price_id, id1=item['id'][0]), callback=self.parse_price,
                             meta={'item': item})
        # https://icps.suning.com/icps-web/getVarnishAllPriceNoCache/000000000602333028_731_7310101_0000000000_1_getClusterPrice.jsonp?callback=getClusterPrice
        # 获取其他规格
        flag = response.meta['flag']
        if flag:
            type_list = response.xpath('//*[@id="colorItemList"]/dd/ul/li/@sku').extract()
            # print("规格：  ", type_list)
            for type_id in type_list:
                type_id = type_id.lstrip('0')
                if type_id != item['id'][1]:
                    yield scrapy.Request(url=self.detail_url.format(item['id'][0], type_id), callback=self.parse_detail,
                                         meta={'id': [item['id'][0], type_id], 'flag': 0})

    def parse_price(self, response):
        # print(response.text)
        item = response.meta['item']
        # todo 取后半部分，需要测试是否会有冲突
        item['id'] = 'sn_' + item['id'][1]

        # TODO 价格为空的2种可能情况：1. 商品已经下架，2. 不支持销售 价格为空
        item['price'] = re.findall(r'"promotionPrice":"(.*?)"', response.text)[0]

        # 判断所有项是否为空，为空的舍去
        if item['price']:
            yield item
        # print(price)

    def item_scraped(self, item):
        """TODO：传进来的signal参数，没法传到中间件，暂时只能在这个地方，发送信号"""
        self.item_count += 1
        percent = int((self.item_count / item['total']) * 100)
        self.signal.send_int.emit(percent)
        # self.signal.send_str.emit(str(item))
        self.signal.send_str.emit('%s/%s' % (self.item_count, item['total']))

    def spider_closed(self):
        self.signal.send_str.emit('close')
        self.signal.send_int.emit(100)


