# -*- coding: utf-8 -*-
import scrapy
import urllib.parse
import json
import datetime
import re
from w3lib.html import remove_tags

import settings
from utils import errors, common
from .myspider import MySpider
from items import TbItemPropItem


class TbItemPropSpider(MySpider):
    name = 'tb_itemprop'
    # allowed_domains = ['taobao.com']
    redis_key = 'tb_itemprop:start_urls'
    searchUrl = "https://s.taobao.com/search?"
    searchParams = {
        # "tab": "mall",
        "sort": "renqi-desc",  # 按人气排序
        "bcoffset": 4,
        "ntoffset": 4,
        "4ppushleft": "1%2C48"
    }

    def __init__(self, **kwargs):
        super(TbItemPropSpider, self).__init__(**kwargs)

    def parse(self, response):
        self.log.logger.debug('Parse URL: {0}'.format(response.url))
        domain = self.get_domain(response.url)
        if domain == settings.WEB_HOST:
            task_id = self.get_taskid(response.text)
            if task_id:
                keyword = self.get_keyword(task_id)
                self.searchParams.update({"q": keyword})
                self.searchParams.update({"s": 0})
                data = urllib.parse.urlencode(self.searchParams)
                next_url = self.searchUrl + data
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta={'task_id': task_id})
        else:
            task_id = response.meta.get("task_id", 0)
            pattern = re.compile(r'g_page_config = (?P<data>.*);')
            match = re.search(pattern, response.text)
            if match:
                jsonTxt = match.group('data')
                jsonDict = json.loads(jsonTxt)

                try:
                    resultList = jsonDict["mods"]["itemlist"]["data"]["auctions"]
                except Exception as e:
                    # 如果已搜索不到结果，则结束爬虫任务并返回
                    self.set_task_done(task_id)
                    return

                for entry in resultList:
                    pic_url = entry["pic_url"] if "https:" in entry["pic_url"] else "https:" + entry["pic_url"]
                    item = TbItemPropItem()
                    item["crawler_task_id"] = task_id
                    item["item_id"] = entry["nid"]
                    item["title"] = remove_tags(entry["title"])
                    item["pic_url"] = pic_url
                    item["price"] = entry["view_price"]
                    item["sales"] = common.extract_num(entry["view_sales"])
                    item["location"] = entry["item_loc"]
                    item["is_tmall"] = entry["shopcard"].get("isTmall", 0)
                    item["nick"] = entry["nick"]
                    item["credit"] = entry["shopcard"].get("sellerCredit", None)
                    item["crawled_time"] = datetime.datetime.now()
                    yield item

            if not self.is_task_done(task_id):
                # 继续yield下一页URL，meta带上task_id
                s = common.extract_num(response.url, prefix="s=")
                next_s = s + 44
                next_url = response.url.replace("s={0}".format(s), "s={0}".format(next_s))
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta={'task_id': task_id})
            else:
                self.set_task_done(task_id)