# -*- coding: utf-8 -*-
import json
import os
import sys
from configparser import ConfigParser
from urllib import parse

import scrapy
from PyQt5.QtCore import pyqtSignal
# from pydispatch import dispatcher
# from scrapy import signals
from pydispatch import dispatcher
from scrapy import signals

from spider.items import ProductItem


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com', 'p.3.cn', 'jd.hk']
    headers = {}
    # TODO: 打包后，路径改变
    # cfg_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "config\\settings.cfg")
    # cfg_file = 'settings.cfg'  # TODO： 将配置文件，放到exe目录下
    # cfg_file = os.path.join(os.path.dirname(sys.argv[0]), 'settings.cfg')
    # print('jd cfg_file', cfg_file)
    # config = ConfigParser()
    # config.read(cfg_file, encoding='gbk')

    search_url = "https://search.jd.com/Search?keyword={keyword}&enc=utf-8"
    first_url = "https://search.jd.com/Search?keyword={}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&stock=1&page={}&s={}&click=0"
    next_url = "https://search.jd.com/s_new.php?keyword={}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&bs=1&wq={}stock=1&" \
               "page={}&s={}&scrolling=y&log_id=1545637088.54177&tpl=3_M"
    detail_url = "https://item.jd.com/{id}.html"
    price_url = "https://p.3.cn/prices/mgets?pduid=15382152254501857071747&skuIds=J_{id}"
    # image_url = ""
    start_urls = []
    id_list = []

    def __init__(self, name, trans_key, signal=None, *args, **kwargs):
        super(JdSpider, self).__init__(*args, **kwargs)
        self.keyword = name
        self.trans_key = trans_key
        self.code = parse.quote(name)
        self.page_total = None
        self.item_count = 0

        self.signal = signal
        if self.signal:
            dispatcher.connect(self.spider_closed, signals.spider_closed)
            dispatcher.connect(self.item_scraped, signals.item_scraped)

    def start_requests(self):
        # yield: 既可以传出一个item到pipeline进行加工，
        # 也可以传出一个新的Request请求。在传出一个新请求的时候，就会多开启一个线程，Scrapy是异步多线程的爬虫框架
        yield scrapy.Request(self.search_url.format(keyword=self.code), callback=self.parse_page)

    def parse_page(self, response):
        # 首先从search url获取到总页码数，然后用另外一个接口来获取商品列表，只多了一步，
        page = response.xpath('//span[@class="fp-text"]/i/text()')[0].extract()
        self.page_total = page
        # print("page_total", page)
        # 这里测试一页，需要爬取所有页，则将page=1注释
        # page = 1
        for i in range(int(page)):
            # print('page', page)
            # 前30个商品id
            # TODO: 这个需要id去重，请求自动过滤
            yield scrapy.Request(url=self.first_url.format(self.code, 2 * i - 1, (i - 1) * 60 + 1),
                                 callback=self.parse_id_list)
            headers = {
                'referer': 'https://search.jd.com/Search?keyword={}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&bs=1&wq={}'
                           '&stock=1&page={}&s=1&click=0'.format(self.code, self.code, 2 * i - 1),
            }
            # 后30个商品id
            yield scrapy.Request(url=self.next_url.format(self.code, self.code, i + 1, 48 * i - 20),
                                 headers=headers, callback=self.parse_next)

    def parse_id_list(self, response):
        id_list = response.xpath('//li[@class="gl-item"]/@data-sku').extract()
        self.id_list = self.id_list + id_list
        # print(id_list)
        # print(len(self.id_list))
        # headers = {'refer': response.url}
        for id in id_list:
            # TODO： 这个id暂时不需要去重
            yield scrapy.Request(url=self.detail_url.format(id=id), callback=self.parse_detail,
                                 meta={'id': id, 'flag': 1}, dont_filter=True)

    def parse_next(self, response):
        next_list = response.xpath('//li[@class="gl-item"]/@data-sku').extract()
        self.id_list = self.id_list + next_list
        # TODO: 不需要去重，之前的请求只是解析规格，并没有解析详细数据
        for id in next_list:
            yield scrapy.Request(url=self.detail_url.format(id=id), callback=self.parse_detail,
                                 meta={'id': id, 'flag': 1}, dont_filter=True)
        # print(next_list)
        # print(len(next_list))

    def parse_detail(self, response):
        id = response.meta['id']
        item = ProductItem()
        # item['url'] = response.url
        item['total'] = int(self.page_total) * 60  # 记录商品总数
        item['source'] = '京东'
        item['keyword'] = self.keyword
        item['trans_key'] = self.trans_key
        # 三种可能
        name = response.xpath('//div[@class="sku-name"]/text()').extract()
        item['name'] = ''.join(name).strip()
        # if name[0].strip():
        #     item['name'] = name[0].strip()
        # elif name[1].strip():
        #     item['name'] = name[1].strip()
        # elif name[2].strip():
        #     item['name'] = name[2].strip()
        # TODO scr data-url 测试
        # images = response.xpath('//*[@id="spec-list"]/ul/li/img/@src').extract()
        images = response.xpath('//*[@id="spec-list"]/ul/li/img/@data-url').extract()  # data-url
        # print(images)
        if not images:
            # 书的图片链接，在页面中的位置不一样，单独处理
            images = response.xpath('//*[@id="spec-list"]/div/ul/li/img/@src').extract()
        for i in range(len(images)):
            # 拼接URL TODO： 改变爬取图片尺寸
            images[i] = "http://img13.360buyimg.com/n1/s150x150_" + images[i]
            # images[i] = "https:" + images[i]
        item['images'] = images
        yield scrapy.Request(url=self.price_url.format(id=id), callback=self.parse_price, meta={'id': id, 'item': item})
        # 对于商品详情页，通过搜索页进入的需要获取规格列表，从详情页进入的不需要
        flag = response.meta['flag']
        if flag:
            detail_list = response.xpath('//*[@id="choose-attr-1"]/div[2]/div/@data-sku').extract()
            # url可能重复，某商品id下的所有规格包括该商品，尝试在获取到的规格列表中，去掉该商品
            # 如果list为空，下面的循环就不会执行
            for detail_id in detail_list:
                if int(detail_id) != int(id):
                    yield scrapy.Request(url=self.detail_url.format(id=detail_id), callback=self.parse_detail,
                                         meta={'id': detail_id, 'flag': 0})

    @staticmethod
    def parse_price(response):
        # print(response.text)
        item = response.meta['item']
        item['id'] = 'jd_' + response.meta['id']
        # data = json.loads(response.text)
        # print(data)
        price = json.loads(response.text)[0]['p']
        # print(price)
        item['price'] = price
        # print("获取到的最后数据：")
        # print(dict(item))
        yield item

    def item_scraped(self, item):
        """TODO：传进来的signal参数，没法传到中间件，暂时只能在这个地方，发送信号"""
        # if self.signal:
        self.item_count += 1
        percent = int((self.item_count / item['total']) * 100)
        self.signal.send_int.emit(percent)
        self.signal.send_str.emit('%s/%s' % (self.item_count, item['total']))

    def spider_closed(self):
        # if self.signal:
        self.signal.send_str.emit('close')
        self.signal.send_int.emit(100)

