# encoding:utf8
"""
@File        : mcw.py
@Time        : 2019/7/5 14:17
@Author      : zhaoy
@Email       : zhaoyao@shandiangou.cc
@Description :  美菜网爬虫
"""
import json
import random

import scrapy


class MeiCaiWangSpider(scrapy.Spider):
    name = 'mcw'
    allowed_domains = ['yunshanmeicai.com']

    custom_settings = {
        'DEFAULT_REQUEST_HEADERS': {
            'Host': "online.yunshanmeicai.com",
            'Accept': "application/json, text/plain, */*",
            'X-Requested-With': "XMLHttpRequest",
            'Accept-Language': "zh-cn",
            'Cache-Control': "no-cache",
            'X-MC-City': "15",
            'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 MicroMessenger/7.0.3(0x17000321) NetType/WIFI Language/zh_CN",
            'Content-Type': "application/json;charset=UTF-8",
            'X-MC-Area': "6433",
        },
        'IMAGES_STORE': 'G:/spiders_data/images/mcw/',
        'ITEM_PIPELINES': {
            'baiguoyuan.pipelines.GoodsImagesPipeline': 1,
            'baiguoyuan.pipelines.CommomPipelines': 321
        },
    }

    payload = {
        'tickets': 'jwt:eyJhbGciOiJSUzI1NiJ9.eyJpZCI6IjMwMDE3NjU3IiwianRpIjoiNTE3NmE4MjVkZmIwYjMzMjFhN2I2ZDQ2OTgy'
                   'NThkODUtMzptYWxsIiwiaWF0IjoxNTYyMjkwNjE4MjA5fQ.hM9XaUK81prq_ssZ45FLZ0mhSXvQsmWszfkM6J9Xb8TtfI'
                   'VhqSDnNIeiErKu7IR2qAO0aH0WcEzf1a9PBkK4M5O4vvOgIaUsTx75uH-g87SKhbESlvemSSMWTRmUKDTxR_bFn2pYvQZ'
                   'HG90S-VtXGV4Gn6zmE_7fwZ3cK-583jc',
        'city_id': '15', 'area_id': '6433', 'company_id': '29581177',
        '_ENV_': {'source': 'weixin', 'distribute_channel': 'weixin', 'platform': '0',
                  'device_id': 'o08NEtzQ3nBDuSajoJ0BvIxjVZ8c', 'device_name': '', 'app_version': '2.6.8',
                  'os_version': '', 'appkey_version': '', 'net': '', 'mno': '', 'imei': '',
                  'open_id': 'o08NEtzQ3nBDuSajoJ0BvIxjVZ8c', 'idfa': '', 'idfv': '', 'sn': '', 'mac': '',
                  'ssid': '', 'bssid': '', 'lat': 34.23861786417517, 'lng': 108.97043109099755},
        'salt_sign': '9E9E45C4C7CE799069BE6E8FCD1443A1,66,1562292412413'}

    def start_requests(self):
        url = 'https://online.yunshanmeicai.com/mall/api/commodity/saleclass'
        yield scrapy.Request(url, body=json.dumps(self.payload))

    def parse(self, response):
        data = response.text
        print(data)
        data = json.loads(data)
        cat = data.get('data')
        ul = 'https://online.yunshanmeicai.com/mall/api/commodity/saleclass'
        for c in cat:
            u = ul + '?a=' + str(random.randrange(1, 1000000))
            pl = self.payload
            pl.update({'parent_id': c.get('id')})
            yield scrapy.Request(u, callback=self.parse_sub_cate, body=json.dumps(pl))

    def parse_sub_cate(self, response):
        data = response.text
        data = json.loads(data)
        print(data)
        cat = data.get('data')
        ul = 'https://online.yunshanmeicai.com/mall/api/search/getsearchlistbyc2'
        for c in cat:
            pl = self.payload
            c1_id = int(c.get('parent_id'))
            c2_id = int(c.get('id'))
            pl.update({
                'sale_c1_id': c1_id,
                'sale_c2_id': c2_id,
                'size': 30,
                'page': 1,
                'score_type': 1
            })
            meta = {
                'cname': c.get('name'),
                'sale_c1_id': c1_id,
                'sale_c2_id': c2_id,
                'url': ul,
                'page': 1,
            }
            u = ul + '?a=' + str(random.randrange(1, 1000000))
            yield scrapy.Request(u, callback=self.parse_list, body=json.dumps(pl), meta=meta)

    def parse_list(self, response):
        data = response.text
        meta = response.meta
        data = json.loads(data)
        res = data.get('data')

        image_urls = []
        image_header = {
            'Accept': "image/*;q=0.8",
            'Accept-Language': "zh-cn",
            'Connection': "keep-alive",
            'Accept-Encoding': "gzip, deflate",
            'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 MicroMessenger/7.0.3(0x17000321) NetType/WIFI Language/zh_CN'
        }
        try:
            dt = data.get('data').get('rows')
        except AttributeError:
            dt = None
        if dt:
            print(dt)
            image_urls = [(item.get('img_url'), meta.get('cname')) for item in dt]
        data.update({'data_collection': 'mcw', 'data_from': 'data.rows', 'image_urls': image_urls,
                     'image_header': image_header})
        yield data
        if res.get('is_last_page') == 0:
            pl = self.payload
            pl.update({
                'sale_c1_id': meta.get('parent_id'),
                'sale_c2_id': meta.get('id'),
                'size': 30,
                'score_type': 1,
                'page': int(meta.get('page') + 1),
            })
            url = response.request.url + '?a=' + str(random.randrange(1, 1000000))
            yield response.request.replace(url=url, body=json.dumps(pl))
