# encoding:utf8
"""
@File        : suning.py
@Time        : 2019/7/8 14:59
@Author      : zhaoy
@Email       : zhaoyao@shandiangou.cc
@Description : 苏宁小店
"""
import json
import re

import pymongo
import scrapy

# from spider.utils.json_extract import extract
from ..settings import *


class SuNingXiaoDinStore(scrapy.Spider):
    name = 'snxd_store'

    custom_settings = {
        'DEFAULT_REQUEST_HEADERS': {
            'User-Agent': "Mozilla/5.0(Linux; U;NEARBY-APP;SNCLIENT; Android 6.0; en; Samsung Galaxy S7_1) AppleWebKit/533.0 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            'Host': "ns.suning.com",
            'Accept': "*/*",
        },
        'IMAGES_STORE': 'G:/spiders_data/images/suningxiaodian/',
        'ITEM_PIPELINES': {
            'baiguoyuan.pipelines.CommonPipelinesDropDuplicated': 320
        },
    }

    def __init__(self):
        self.client = pymongo.MongoClient(host=MONGO_URI)
        self.db = self.client[MONGO_DATABASE]
        self.collection = 'grids'
        super(SuNingXiaoDinStore, self).__init__()

    def start_requests(self):
        url = 'https://ns.suning.com/nsafs-web/poi/searchbaidupoi.do?searchKey=苏宁小店&' \
              'locLng={center_lng}&locLat={center_lat}&source=android&version=4.0.5'
        locations = self.db.get_collection(self.collection).find({'city': '西安市'},
                                                                 {'city': 1, 'center_lat': 1, 'center_lng': 1})
        for loc in locations:
            ul = url.format(**loc)
            yield scrapy.Request(ul, callback=self.parse_store_list)

    def parse_store_list(self, response):
        data = response.text
        data = json.loads(data)
        data.update(
            {'data_collection': 'suning_store', 'data_from': 'resultData.baiduPoiInfoList', 'drop_by': 'baiduPoiId'})
        yield data


class SuNingXiaoDian(scrapy.Spider):
    name = 'snxd'

    custom_settings = {
        'DEFAULT_REQUEST_HEADERS': {
            'User-Agent': "Mozilla/5.0(Linux; U;NEARBY-APP;SNCLIENT; Android 5.1.1; zh; HUAWEI MLA-AL10) AppleWebKit/533.0 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            'Host': "ns.suning.com",
            'Accept': "*/*",
            'accept - encoding': "gzip, deflate",
            'Connection': "keep-alive",
            'cache-control': "no-cache"
        },
        'IMAGES_STORE': '/data/images/suningxiaodian/',
        'ITEM_PIPELINES': {
            'baiguoyuan.pipelines.GoodsImagesPipeline': 1,
            'baiguoyuan.pipelines.CommomPipelines': 321
        },
    }

    def __init__(self):
        self.client = pymongo.MongoClient(host=MONGO_URI)
        self.db = self.client[MONGO_DATABASE]
        self.collection = 'suning_store'
        super(SuNingXiaoDian, self).__init__()

    def start_requests(self):
        """
        suning baidu poi =>  suning poi
        :return:
        """
        locations = self.db.get_collection(self.collection).find({},
                                                                 {'baiduPoiId': 1, 'locLat': 1, 'locLng': 1})
        ul = 'https://ns.suning.com/nsafs-web/poi/getnspoi.do?cityName=西安市&baiduPoiId={baiduPoiId}&source=android&version=4.0.5'
        for item in locations:
            u = ul.format(**item)
            yield scrapy.Request(u, callback=self.parse_suning_poi)

    def parse_suning_poi(self, response):
        data = response.text
        data = json.loads(data)
        poiId = extract(data, "resultData.poiInfo.poiId")
        meta = extract(data, "resultData.poiInfo")
        # 505739 新鲜水果
        # 506586 肉禽蛋品
        # 506585 海鲜水产
        url = 'https://ns.suning.com/nssfs-web/search/goods/v1/{poiId}__{cat_id}____0_1_20_3____android_4.0.3_.htm'
        cat_list = ['505739', '506586', '506585', '506584', '506691', '506692', '506693', '506695', '506696',
                    '506697', '505740']
        for cat_id in cat_list:
            ul = url.format(poiId=poiId, cat_id=cat_id)
            yield scrapy.Request(ul, callback=self.parse_cat_list, meta=meta)

    def parse_cat_list(self, response):
        data = response.text
        meta = response.meta
        data = re.sub(r' \n', '', data)
        data = json.loads(data)
        # goods 详情信息
        goods = extract(data, 'resultData.goodsSearchInfo')
        [i.update(meta) for i in goods]
        image_urls = [("https:" + item.get('pictureUrl'), item.get('goodsName')) for item in goods]
        [i.get('pictureUrl') for i in goods]
        data['resultData']['goodsSearchInfo'] = goods
        data.update(
            {'data_collection': 'suning_goods', 'data_from': 'resultData.goodsSearchInfo', 'image_urls': image_urls})
        yield data

        headers = {
            'User-Agent': "Mozilla/5.0(Linux; U;NEARBY-APP;SNCLIENT; Android 6.0; en; Samsung Galaxy S7_1) "
                          "AppleWebKit/533.0 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            'Content-Type': "application/x-www-form-urlencoded; charset=utf-8",
            'Host': "ns.suning.com",
            'cache-control': "no-cache",
            'Accept-Encoding': "gzip",
        }
        goods_metas = []

        for good in goods:
            mt = {}
            [mt.setdefault(i, good[i]) for i in good if i in
             ["csCatalog", "goodType", "goodsCode", "goodsMerchantCode", "restLog",
              "goodsStoreCode", "isServiceGoods", "makeCodeIden"]]
            goods_metas.append(mt)

        payload = "goodsMetas={goodsMetas}&channelId={channelId}&cityId={cityId}&districtCode={districtCode}&source={source}&version={version}"
        payloads = dict()
        payloads["channelId"] = "55"
        payloads["cityId"] = meta.get('cityCode')
        payloads["districtCode"] = meta.get('townCode')
        payloads["source"] = "android"
        payloads["version"] = "4.0.5"
        payloads["goodsMetas"] = goods_metas
        payload = payload.format(**payloads)
        list_ul = 'https://ns.suning.com/nssfs-web/search/gateway/fill/v1.do'
        yield scrapy.Request(url=list_ul, callback=self.parse_goods_list, method='post',
                             body=payload,
                             headers=headers,
                             meta=meta)
        # 分页请求
        goods_count = extract(data, 'resultData.goodsCount')
        page_num = int(int(goods_count) / 20)
        if page_num % 20 != 0:
            page_num += 1
        page = ''.join(re.findall(r'____0_(\d)_20_3____', response.request.url))
        if page:
            if page_num > 1 and int(page) == 1:
                for i in range(2, page_num + 1):
                    ul = response.request.url
                    ul = re.sub(r'____0_(\d)_20_3____', '____0_' + str(i) + '_20_3____', ul)
                    yield response.request.replace(url=ul)

    def parse_goods_list(self, response):
        data = response.text
        meta = response.meta
        data = re.sub(r' \n', '', data)
        data = json.loads(data)
        goods_list = extract(data, 'resultData.goodsList')
        [i.update(meta) for i in goods_list]
        data['resultData']['goodsList'] = goods_list
        data.update({'data_collection': 'suning_store_price', 'data_from': 'resultData.goodsList'})
        yield data
