# -*- coding: utf-8 -*-
import json
import os
import random
import re
import sys
from configparser import ConfigParser
from urllib import parse

import scrapy
from pydispatch import dispatcher
from scrapy import signals

from spider import settings
from spider.items import ProductItem
from spider.settings import USER_AGENT_LIST


class YhdSpider(scrapy.Spider):
    name = 'yhd'
    # allowed_domains = ['yhd.com']
    # start_urls = ['http://yhd.com/']
    # page_url = "http://search.yhd.com/c0-0/mbname-b/a-s1-v4-p{}-price-d0-f0b-m1-rt0-pid-mid0-color-size-k{}/"
    # https://search.yhd.com/c0-0/k%25E5%2593%2588%25E6%25A0%25B9%25E8%25BE%25BE%25E6%2596%25AF/
    # https://search.yhd.com/c0-0/k%25E5%2593%2588%25E6%25A0%25B9%25E8%25BE%25BE%25E6%2596%25AF/#page=2&sort=1
    # TODO:更改接口，两个都行，下面该接口，不会搜出关键字外的商品
    page_url = 'https://search.yhd.com/c0-0/k{1}/#page={0}&sort=1'
    next_url = 'http://search.yhd.com/searchPage/c0-0/mbname-b/a-s1-v4-p{}-price-d0-f0b-m1-rt0-pid-mid0-color-' \
               'size-k{}/?isGetMoreProducts=1' \
               '&moreProductsDefaultTemplate=0&isLargeImg=0&moreProductsFashionCateType=2&nextAdIndex=0' \
               '&nextImageAdIndex=0&adProductIdListStr=&fashionCateType=2&firstPgAdSize=0&needMispellKw' \
               '=&onlySearchKeyword=0'
    detail_url = "https://item.yhd.com/{}.html"
    price_url = "https://itemapi.yhd.com/getPrices.do?params.area=2_2817_51973_0&params.skuIds={}"

    def __init__(self, name, trans_key, signal=None,  *args, **kwargs):
        super(YhdSpider, self).__init__(*args, **kwargs)
        self.keyword = name
        self.trans_key = trans_key
        self.product_code = parse.quote(name)  # TODO 实际上，拼接的时候会自动转码，这里有点多余

        self.item_count = 0
        self.signal = signal
        if self.signal:
            dispatcher.connect(self.spider_closed, signals.spider_closed)
            dispatcher.connect(self.item_scraped, signals.item_scraped)

    def start_requests(self):
        # 从第一页开始，page=1
        url = self.page_url.format(1, self.product_code)
        yield scrapy.Request(url=url, callback=self.parse_page, meta={'page': 2, 'page_total': None})

    def parse_page(self, response):
        """解析搜索页"""

        if response.meta['page_total'] is None:
            page_total = response.xpath('//input[@id="pageCountPage"]/@value').extract_first()
        else:
            page_total = response.meta['page_total']
        id_list = response.xpath('//div[@class="mod_search_pro"]/@id').extract()
        # print(len(id_list), id_list)
        # 请求前30个商品URL的具体页面
        for id_ in id_list:
            id_ = id_.lstrip('producteg_')  # 去掉左边的'producteg_'
            yield scrapy.Request(url=self.detail_url.format(id_), callback=self.parse_detail,
                                 meta={'id': id_, 'flag': 1, 'page_total': page_total})

        page = response.meta['page']
        # cookie = response.request.headers.getlist('Cookie')
        # print('cookiejar', response.meta['cookiejar'])
        cookie = response.headers.getlist('Set-Cookie')
        cookie = str(cookie[0], encoding='utf-8')
        jsessionid = cookie.split(';')[0].split('=')[1]
        # print(cookie)
        next_url = self.next_url.format(page, self.product_code)
        headers = {
            'Accept': '* / *',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Host': 'search.yhd.com',
            'User-Agent': random.choice(USER_AGENT_LIST),
            'Referer': 'http://search.yhd.com/c0-0/mbname-b/a-s1-v4-p1-price-d0-f0b-m1-rt0-pid-mid0-color-size-k'
                       '' + str(self.product_code) + '/',
            'X-Requested-With': 'XMLHttpRequest'
        }
        cookies = {
            'cart_cookie_uuid': '3dfae85e-5cd8-412c-814a-8d5574f9c904',
            '__jdv': '259140492|baidu|-|organic|not set|1544408122396',
            'yhd_location': '2_2817_51973_0',
            'provinceId': '2',
            'cityId': '2817',
            '__jda': '218172059.1543976952482138234034.1543976952.1545109993.1545272010.19',
            '__jdc': '218172059',
            'JSESSIONID': jsessionid,
            '__jdb': '218172059.3.1543976952482138234034|19.1545272010'
        }
        # 请求后30个商品的URL
        yield scrapy.Request(url=next_url, callback=self.parse_next, headers=headers, cookies=cookies,
                             meta={'page': page, 'page_total': page_total})
        # 请求下一页，可以判断当前页码是否小于总页码，也可以查看当前页面是否含有下一页这个按钮

        # print("page_total = ", page_total)
        # TODO 测试2页
        # page_total = 2
        if page < int(page_total):
            page = page + 1
            yield scrapy.Request(url=self.page_url.format(page, self.product_code), callback=self.parse_page,
                                 meta={'page': page, 'page_total': page_total})  # 递归调用

    def parse_next(self, response):
        """解析后三十条数据的商品id"""
        page_total = response.meta['page_total']
        content = json.loads(response.body)
        # print(content)
        # print(response.text)
        html = content['value']
        # print(html)
        # print(html.xpath('//div[@class="mod_search_pro"]/@id').extract())
        # TODO 采用xpath解析部分HTML代码，需要用bs或者lxml格式化，这里暂时先用正则表达式匹配
        more_list = re.findall(r'<div class="mod_search_pro" id="productegMore_(.*?)"', html, re.M | re.S)
        # print(len(more_list), more_list)
        for id in more_list:
            yield scrapy.Request(url=self.detail_url.format(id), callback=self.parse_detail,
                                 meta={'id': id, 'flag': 1, 'page_total': page_total})

    def parse_detail(self, response):
        # TODO 获取商品名字、图片链接、调用价格解析，不同平台的爬虫，应该保存完整的图片URL
        item = ProductItem()
        page_total = response.meta['page_total']
        item['total'] = int(page_total) * 60
        id_ = response.meta['id']
        item['source'] = '一号店'
        item['keyword'] = self.keyword
        item['trans_key'] = self.trans_key
        item['id'] = 'yhd_' + id_  # id在这里添加前缀
        item['name'] = response.xpath('//*[@id="productMainName"]/text()').extract_first()
        # print('name: ', item['name'])
        # origin_src:   jfs/t19573/210/2226409645/31851/e49a0e01/5aec15c7Nfdf35e75.jpg
        # src:  //img13.360buyimg.com/n1/s50x50_jfs/t19573/210/2226409645/31851/e49a0e01/5aec15c7Nfdf35e75.jpg
        # url:  http://img13.360buyimg.com/n1/s360x360_jfs/t18274/105/2188340833/20763/5e038152/5aec15c6N41ee56b4.jpg
        # TODO 是否需要指定尺寸，这里都取
        images = response.xpath('//*[@class="detail_main_pic_class"]/@original_src').extract()
        for i in range(len(images)):
            images[i] = "http://img13.360buyimg.com/n1/s150x150_" + images[i]
        # print('images', images)
        item['images'] = images
        # 获取价格，不设置headers和cookies，也能获取到
        yield scrapy.Request(url=self.price_url.format(id_), callback=self.parse_price, meta={'item': item})

        # 当传入的flag为真时，就查询该商品的种类列表
        # 从搜索页进入的都要查找种类列表，从详情页进入的不需要获取，直接解析信息即可
        # flag不能声明为True或False
        if response.meta['flag']:
            # 获取该商品的其他种类，需要加判断，排除本身，
            # TODO 应该先做判断再解析，可以将种类列表作为参数传递
            # 种类列表在嵌入的js中，先用xpath解析 /html/body/script[1]获取包含内容的script标签，再用正则匹配
            js_content = response.xpath('/html/body/script[1]/text()').extract_first()
            data = re.findall(r'availAttrbutes: \[(.*?)\],', js_content, re.M | re.S)[0]
            data = '[' + data + ']'
            json_data = json.loads(data)
            # print(json_data)
            # 可能为空,添加判断
            test = [i['skuId'] for i in json.loads("["+re.findall(r'availAttrbutes: \[(.*?)\],', response.xpath('/html/body/script[1]/text()').extract_first(),
                       re.M | re.S)[0]+"]")]
            if json_data:
                for i in json_data:
                    # i['skuId']是一个int类型的变量，都转化为int类型进行比较
                    if int(i['skuId']) != int(id_):
                        # print("进入指定规格", i['skuId'])
                        yield scrapy.Request(url=self.detail_url.format(i['skuId']), callback=self.parse_detail,
                                             meta={'id': str(i['skuId']), 'flag': 0, 'page_total': page_total})

    def parse_price(self, response):
        item = response.meta['item']
        price = json.loads(response.text)['data'][0]['p']
        item['price'] = price
        yield item

    def item_scraped(self, item):
        """TODO：传进来的signal参数，没法传到中间件，暂时只能在这个地方，发送信号"""
        self.item_count += 1
        percent = int((self.item_count / item['total']) * 100)
        self.signal.send_int.emit(percent)
        # self.signal.send_str.emit(str(item))
        self.signal.send_str.emit('%s/%s' % (self.item_count, item['total']))

    def spider_closed(self):
        self.signal.send_str.emit('close')
        self.signal.send_int.emit(100)


if __name__ == "__main__":
    print("")
