# -*- coding: utf-8 -*-
import scrapy
import urllib.parse
import json
import datetime

import settings
from utils import errors, common
from .myspider import MySpider
from items import TBKeyListItem


class TbKeylistSpider(MySpider):
    name = 'tb_keylist'
    # allowed_domains = ['taobao.com']
    redis_key = 'tb_keylist:start_urls'
    searchUrl = "https://suggest.taobao.com/sug?code=utf-8&q="

    def __init__(self, **kwargs):
        super(TbKeylistSpider, self).__init__(**kwargs)

    def parse(self, response):
        self.log.logger.debug('Parse URL: {0}'.format(response.url))
        domain = self.get_domain(response.url)
        if domain == settings.WEB_HOST:
            task_id = self.get_taskid(response.text)
            if task_id:
                keyword = self.get_keyword(task_id)
                next_url = self.searchUrl + urllib.parse.quote(keyword)
                meta_dict = {'task_id': task_id, 'level': 1}
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta=meta_dict)
        else:
            task_id = response.meta.get("task_id", 0)
            level = response.meta.get("level", 0)
            json_data = json.loads(common.extract_json(response.text.strip()))
            result = json_data.get("result", [])

            for entry in result:
                item = TBKeyListItem()
                item["crawler_task_id"] = task_id
                item["level"] = level
                item["hotword"] = entry[0]
                item["total"] = entry[1]
                item["crawled_time"] = datetime.datetime.now()
                yield item

                if level < 3:
                    next_url = self.searchUrl + urllib.parse.quote(entry[0])
                    meta_dict = {'task_id': task_id, 'level': level+1}
                    yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta=meta_dict)

            if level >= 3 and not self.is_task_done(task_id):
                self.set_task_done(task_id)