# -*- coding: utf-8 -*-
import scrapy
import urllib.parse
import json
import time
import datetime

import settings
from utils import errors, common
from .myspider import MySpider
from items import TbJuLiangfanItem


class TbJuLiangfanSpider(MySpider):
    name = 'tb_juliangfan'
    # allowed_domains = ['taobao.com']
    redis_key = 'tb_juliangfan:start_urls'
    searchUrl = "https://ju.taobao.com/json/jusp2/ajaxGetTpFloor.json?"
    searchParams = {
        "urlKey": "other/juliangfan"
    }
    param_dicts = [
        dict(floor="洗护清洁", params=dict(callback="_mtp_J_79x4dfb31m4", floorIndex=3)),
        dict(floor="护肤彩妆", params=dict(callback="_mtp_J_7bemqo9pkk8", floorIndex=6)),
        dict(floor="母婴用品", params=dict(callback="_mtp_J_7ben5v3judw", floorIndex=7)),
        dict(floor="居家日用", params=dict(callback="_mtp_J_7benqodzxm8", floorIndex=9)),
        dict(floor="食品-粮油生鲜", params=dict(callback="_mtp_J_7beirgdqsjg", floorIndex=4, subFloorIndex=1)),
        dict(floor="食品-零食坚果", params=dict(callback="_mtp_J_7beirgdqsjg", floorIndex=4, subFloorIndex=2)),
        dict(floor="食品-名酒美茶", params=dict(callback="_mtp_J_7beirgdqsjg", floorIndex=4, subFloorIndex=3)),
        dict(floor="食品-滋补保健", params=dict(callback="_mtp_J_7beirgdqsjg", floorIndex=4, subFloorIndex=4))
    ]

    def __init__(self, **kwargs):
        super(TbJuLiangfanSpider, self).__init__(**kwargs)

    def parse(self, response):
        self.log.logger.debug('Parse URL: {0}'.format(response.url))
        domain = self.get_domain(response.url)
        if domain == settings.WEB_HOST:
            task_id = self.get_taskid(response.text)
            if task_id:
                for param_dict in self.param_dicts:
                    self.searchParams.update(param_dict["params"])
                    data = urllib.parse.urlencode(self.searchParams)
                    next_url = self.searchUrl + data
                    meta_dict = {'task_id': task_id, 'floor': param_dict["floor"]}
                    yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta=meta_dict)
        else:
            task_id = response.meta.get("task_id", 0)
            floor = response.meta.get("floor", "")
            json_data = json.loads(common.extract_json(response.text.strip()))

            try:
                itemList = json_data["data"]["_juspItem"]["result"]["itemList"]
            except Exception as e:
                self.log.logger.exception(str(e))
                raise errors.JsonDecodeError

            for entry in itemList:
                item = TbJuLiangfanItem()
                item["crawler_task_id"] = task_id
                item["item_id"] = entry["baseinfo"]["itemId"]
                item["title"] = entry["name"]["shortName"]
                item["pic_url"] = entry["baseinfo"]["picUrlFromIc"]
                item["price"] = entry["price"]["actPrice"]
                item["sales"] = entry["remind"]["soldCount"]
                item["floor"] = floor
                os_time = entry["baseinfo"]["ostime"]
                localtime = time.localtime(os_time / 1000)
                item["os_time"] = time.strftime('%Y-%m-%d %H:%M:%S', localtime)
                item["crawled_time"] = datetime.datetime.now()
                yield item

            if floor == "食品-滋补保健":
                self.set_task_done(task_id)