#!/usr/bin/env python3

import scrapy
import json
import os

from zuwome.items import UidItem

#  本文件的绝对路径
FILEPATH = os.getcwd()

class getUrlsSpider(scrapy.Spider):
    name = "downuids"

    def start_requests(self):
        urls = [
                "https://v2.zuwome.com/rents_bycate?v=2.7.2&dev=android&dev version=6.0&dev name=HUAWEIGRA-UL000&uuid=56af31f4b502da60&cate=recommend",
                "https://v2.zuwome.com/rents_bycate?v=2.7.2&dev=android&dev version=6.0&dev name=HUAWEIGRA-UL000&uuid=56af31f4b502da60&cate=new",
        ]

        headers = {
                'Accept-Charset': 'UTF-8',
                'Accept-Encoding': 'gzip',
                'User-Agent': 'Dalvik/2.1.0(Linux;U;Android 6.0;HUAWEI GRA-U    L00 Build/HUAWEIGRA-UL00)',
                'X-Requested-With': 'XMLHttpRequest',
                'Content-type': 'application/x-www-form-urlencoded',
                'Connection': 'Keep-Alive',
                'Host': 'v2.zuwome.com',
	}

        count = 0
        for url in urls:
            count += 1
            yield scrapy.Request(url=url, headers=headers,
                    meta={'count': count}, callback=self.parse)


    def parse(self, response):
        data = json.loads(json.loads(json.dumps(response.text,
            ensure_ascii=False)))
        count = response.meta['count']
        #  保存截获的json数据包到本地
        with open("%s/userData/datas.json" % FILEPATH, "a+") as f:
            f.write(json.dumps(response.text, ensure_ascii=False))
            f.write("\n")
            f.flush()

        if count == 1:
            print("parse--list:", data.keys())
            hot_list = data['data']
            for k1,v1 in hot_list.items():
                if k1 == "hot" or k1 == "recommend":
                    for item in v1:
                        yield UidItem(uid=item['user']['uid'])
        elif count == 2:
            print("parse--dict:", data.keys())
            hot_list = data['data']
            for hot_dict in hot_list:
                yield UidItem(uid=hot_dict['user']['uid'])


