from pprint import pprint

import scrapy

from scrapy_Vip_40.items import ScrapyVip40Item


class VipSpider(scrapy.Spider):
    name = "Vip"
    allowed_domains = ["vip.com"]

    # start_urls = ["https://category.vip.com/suggest.php?keyword=%E4%B8%9D%E8%A2%9C"]
    #
    # def parse(self, response):
    #     print(response.text)
    #     pass

    crawl_data = input('请输入你要在唯品会爬取的数据：')
    # 一页120条数据，data中的pageOffset步长120增加，即 0 > 120 > 240，所以此处输入的页码乘以120，为总数据
    crawl_page = int(input('请输入你要爬取的页数：')) * 120

    def start_requests(self):
        # 此处加循环做多页爬取，crawl_page为你输入的需要爬取的总数据，从0开始，到self.crawl_page结束，步长120进行循环
        for page in range(0,self.crawl_page,120):
            url = 'https://mapi.vip.com/vips-mobile/rest/shopping/pc/search/product/rank'
            data = {
                # 将callback注释掉，不然返回的数据不是完全字典格式的
                # 'callback': 'getMerchandiseIds',
                'app_name': 'shop_pc',
                'app_version': '4.0',
                'warehouse': 'VIP_NH',
                'fdc_area_id': '104104119',
                'client': 'pc',
                'mobile_platform': '1',
                'province_id': '104104',
                'api_key': '70f71280d5d547b2a7bb370a529aeea1',
                'user_id': '271963906',
                'mars_cid': '1679974369457_817380ab8428dc58dc862eacc1f8807f',
                'wap_consumer': 'c',
                'standby_id': 'nature',
                'keyword': self.crawl_data,
                'lv3CatIds': '',
                'lv2CatIds': '',
                'lv1CatIds': '',
                'brandStoreSns': '',
                'props': '',
                'priceMin': '',
                'priceMax': '',
                'vipService': '',
                'sort': '0',
                # 此处为爬取的每一页的页码，改为字符串的page
                'pageOffset': str(page),
                'channelId': '1',
                'gPlatform': 'PC',
                'batchSize': '120',
                '_': '1679989908888'
            }
            headers = {
                'accept': '*/*',
                'accept-encoding': 'gzip, deflate, br',
                'accept-language': 'zh-CN,zh;q=0.9',
                'cache-control': 'no-cache',
                'cookie': 'vip_first_visitor=1; vip_address=%257B%2522pid%2522%253A%2522104104%2522%252C%2522cid%2522%253A%2522104104119%2522%252C%2522pname%2522%253A%2522%255Cu5e7f%255Cu4e1c%255Cu7701%2522%252C%2522cname%2522%253A%2522%255Cu4e1c%255Cu839e%255Cu5e02%2522%257D; vip_province=104104; vip_province_name=%E5%B9%BF%E4%B8%9C%E7%9C%81; vip_city_name=%E4%B8%9C%E8%8E%9E%E5%B8%82; vip_city_code=104104119; vip_wh=VIP_NH; vip_ipver=31; mars_cid=1679974369457_817380ab8428dc58dc862eacc1f8807f; mars_sid=47e6193c71c16ef055ffbd030317c4eb; VIP_QR_FIRST=1; mars_pid=0; vip_access_times=%7B%22list%22%3A9%7D; _jzqco=%7C%7C%7C%7C%7C1.1446215096.1679982432466.1679982432466.1679982432466.1679982432466.1679982432466.0.0.0.1.1; PASSPORT_ACCESS_TOKEN=516D7E427964D66D65933F71A756947F0C6FA617; VipRUID=271963906; VipUID=dd2e4351c704b5f65c0bc7b7520851a3; VipRNAME=135*****288; VipDegree=D1; user_class=c; VipUINFO=luc%3Ac%7Csuc%3Ac%7Cbct%3Ac_new%7Chct%3Ac_new%7Cbdts%3A0%7Cbcts%3A0%7Ckfts%3A0%7Cc10%3A0%7Crcabt%3A0%7Cp2%3A0%7Cp3%3A1%7Cp4%3A0%7Cp5%3A1%7Cul%3A3102; VipLID=0%7C1679982470%7C21a81b; visit_id=2040CF1C5EBCB5343039AF0B024D7FD0; vip_cps_cuid=CU1679987850689f2b11b04f107085f0; vip_cps_cid=1679987850692_30029c2f3a42f81973da221a47431608; cps_share=cps_share; cps=adp%3Antq8exyc%3A%40_%401679987850691%3Amig_code%3A4f6b50bf15bfa39639d85f5f1e15b10f%3Aac014miuvl0000b5sq8ca8tgwvxf1jnk; PAPVisitorId=8fb04195b11724412d810b61eb4cf84a; vip_new_old_user=1; vip_tracker_source_from=; VipDFT=3; pg_session_no=40; waitlist=%7B%22pollingId%22%3A%2279CEE99F-1102-4128-B9FF-155A7CF060FF%22%2C%22pollingStamp%22%3A1679989908468%7D',
                'pragma': 'no-cache',
                'referer': 'https://category.vip.com/',
                'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"',
                'sec-fetch-dest': 'script',
                'sec-fetch-mode': 'no-cors',
                'sec-fetch-site': 'same-site',
                'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
            }

            yield scrapy.FormRequest(url=url, method='GET', formdata=data, headers=headers, callback=self.parse_first)

    def parse_first(self, response):
        # 如果一直获取不到response，肯定是请求头数据不够

        # pid_list = []
        # for i in response.json()['data']['products']:
        #     # pprint(i['pid'])
        #     pid_list.append(i['pid'])
        # pprint(pid_list)

        # 使用列表推导式，将遍历的数据取出来，再放入一个列表中，再使用字符串拼接。
        # 列表推导式与上面的for遍历取出再放入列表一样
        list_pid = [i['pid'] for i in response.json()['data']['products']]
        # 唯品会传输数据共120条，前两个50一组，最后一个20一组，将获取的pid切片成三组，再单个拼接字符串
        str_pid1 = ','.join(list_pid[:50])
        str_pid2 = ','.join(list_pid[50:100])
        str_pid3 = ','.join(list_pid[100:120])

        # 在parse()中调用其他yield函数，需要写成写成循环并yield其中的内容
        # self.sendPid(str_pid3) 这样调用错误，无法调用，必须写成循环并yield其中的内容
        # 三次调用，将三组pid数据传入函数，此处已测试，不能三组数据拼一起调用，必须分别调用
        for item in self.sendPid(str_pid1):
            yield item
        for item in self.sendPid(str_pid2):
            yield item
        for item in self.sendPid(str_pid3):
            yield item

    def sendPid(self, productId):
        url = 'https://mapi.vip.com/vips-mobile/rest/shopping/pc/product/module/list/v2'
        data = {
            # 'callback': 'getMerchandiseDroplets2',
            'app_name': 'shop_pc',
            'app_version': '4.0',
            'warehouse': 'VIP_NH',
            'fdc_area_id': '104104119',
            'client': 'pc',
            'mobile_platform': '1',
            'province_id': '104104',
            'api_key': '70f71280d5d547b2a7bb370a529aeea1',
            'user_id': '271963906',
            'mars_cid': '1679974369457_817380ab8428dc58dc862eacc1f8807f',
            'wap_consumer': 'c',
            'productIds': productId,
            'scene': 'search',
            'standby_id': 'nature',
            'extParams': '{"stdSizeVids":"","preheatTipsVer":"3","couponVer":"v2","exclusivePrice":"1","iconSpec":"2x","ic2label":1,"superHot":1,"bigBrand":"1"}',
            'context': '',
            '_': '1679989908897',
        }
        headers = {
            'accept': '*/*',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'cache-control': 'no-cache',
            'cookie': 'vip_first_visitor=1; vip_address=%257B%2522pid%2522%253A%2522104104%2522%252C%2522cid%2522%253A%2522104104119%2522%252C%2522pname%2522%253A%2522%255Cu5e7f%255Cu4e1c%255Cu7701%2522%252C%2522cname%2522%253A%2522%255Cu4e1c%255Cu839e%255Cu5e02%2522%257D; vip_province=104104; vip_province_name=%E5%B9%BF%E4%B8%9C%E7%9C%81; vip_city_name=%E4%B8%9C%E8%8E%9E%E5%B8%82; vip_city_code=104104119; vip_wh=VIP_NH; vip_ipver=31; mars_cid=1679974369457_817380ab8428dc58dc862eacc1f8807f; mars_sid=47e6193c71c16ef055ffbd030317c4eb; VIP_QR_FIRST=1; mars_pid=0; vip_access_times=%7B%22list%22%3A9%7D; _jzqco=%7C%7C%7C%7C%7C1.1446215096.1679982432466.1679982432466.1679982432466.1679982432466.1679982432466.0.0.0.1.1; PASSPORT_ACCESS_TOKEN=516D7E427964D66D65933F71A756947F0C6FA617; VipRUID=271963906; VipUID=dd2e4351c704b5f65c0bc7b7520851a3; VipRNAME=135*****288; VipDegree=D1; user_class=c; VipUINFO=luc%3Ac%7Csuc%3Ac%7Cbct%3Ac_new%7Chct%3Ac_new%7Cbdts%3A0%7Cbcts%3A0%7Ckfts%3A0%7Cc10%3A0%7Crcabt%3A0%7Cp2%3A0%7Cp3%3A1%7Cp4%3A0%7Cp5%3A1%7Cul%3A3102; VipLID=0%7C1679982470%7C21a81b; visit_id=2040CF1C5EBCB5343039AF0B024D7FD0; vip_cps_cuid=CU1679987850689f2b11b04f107085f0; vip_cps_cid=1679987850692_30029c2f3a42f81973da221a47431608; cps_share=cps_share; cps=adp%3Antq8exyc%3A%40_%401679987850691%3Amig_code%3A4f6b50bf15bfa39639d85f5f1e15b10f%3Aac014miuvl0000b5sq8ca8tgwvxf1jnk; PAPVisitorId=8fb04195b11724412d810b61eb4cf84a; vip_new_old_user=1; vip_tracker_source_from=; VipDFT=3; waitlist=%7B%22pollingId%22%3A%225CA19D4C-C9C2-4945-A3C7-29990B1D2FED%22%2C%22pollingStamp%22%3A1679989911432%7D; pg_session_no=41',
            'pragma': 'no-cache',
            'referer': 'https://category.vip.com/',
            'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'script',
            'sec-fetch-mode': 'no-cors',
            'sec-fetch-site': 'same-site',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
        }
        yield scrapy.FormRequest(url=url, method='GET', formdata=data, headers=headers, callback=self.parse_getGoodsData)

    def parse_getGoodsData(self, response):
        # pprint(response.json()['data']['products'])
        for i in response.json()['data']['products']:
            brandId = i['brandId']
            productId = i['productId']
            goods_attr = ','.join([j['name'] + ':' + j['value'] for j in i['attrs']])

            # 判断labels是否在字典里面的键中,i.keys()输出i中所有的键
            if 'labels' in [z for z in i.keys()]:
                label = ','.join([k['bizType'] + ':' + k['value'] for k in i['labels']])
            else:
                label = None

            data = {
                '链接': f'https://detail.vip.com/detail-{brandId}-{productId}.html',
                '标题': i['title'],
                '品牌商': i['brandShowName'],
                '原价': i['price']['marketPrice'],
                '售价': i['price']['salePrice'],
                '折扣': i['price']['saleDiscount'],
                '商品属性': goods_attr,
                '标签': label
            }
            pprint(data)
            # 可以直接按照items中的中文名写
            goodsText = ScrapyVip40Item(链接=data['链接'],
                                  标题=data['标题'],
                                  原价=data['原价'],
                                  售价=data['售价'],
                                  品牌商=data['品牌商'],
                                  折扣=data['折扣'],
                                  商品属性=data['商品属性'],
                                  标签=data['标签'])

            yield goodsText




