# -*- coding: utf-8 -*-
import scrapy
import urllib.parse
import json
import time
import datetime
import hashlib
import requests
import re
from scrapy.http.cookies import CookieJar

import settings
from utils import errors, common
from .myspider import MySpider
from items import TbQianggouItem


class TbQianggouSpider(MySpider):
    name = 'tb_qianggou'
    # allowed_domains = ['taobao.com']
    redis_key = 'tb_qianggou:start_urls'
    data = '{"orderType":"personalized","offset":%d,"limit":50}'
    headers = {
        "Accept":"*/*",
        "Accept-Encoding":"gzip, deflate, br",
        "Accept-Language":"zh-CN,zh;q=0.8",
        "Connection": "keep-alive",
        "Host": "api.m.taobao.com",
        "Referer":"https://qiang.taobao.com/",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
    }

    def __init__(self, **kwargs):
        super(TbQianggouSpider, self).__init__(**kwargs)
        self.log.logger.debug('spider {0} initialized'.format(self.name))

    def get_sign(self, token="", offset=0):
        timestamp = str(int(time.time() * 1000))
        appkey = "12574478"
        data = self.data % offset
        sign = '&'.join([token, timestamp, appkey, data])
        m = hashlib.md5()
        m.update(sign.encode("utf8"))
        sign = m.hexdigest()
        return timestamp, sign

    def get_nexturl(self, timestamp, sign, offset=0):
        data = self.data % offset
        searchUrl = "http://api.m.taobao.com/h5/mtop.msp.qianggou.queryitembyordertype/1.0/?"
        searchParams = dict(v=1.0, api="mtop.msp.qianggou.queryItemByOrderType",
                            appKey="12574478", t=timestamp, callback="mtopjsonp1",
                            type="jsonp", data=data, sign=sign)

        params = urllib.parse.urlencode(searchParams)
        next_url = searchUrl + params
        return next_url

    def get_token(self, cookies):
        token = ""
        for cookie in cookies:
            cookie_str = common.bytes_to_str(cookie)
            match_re = re.match(".*_m_h5_tk=(.*?)_", cookie_str)
            if match_re:
                token = match_re.group(1)
                break
        return token

    def parse(self, response):
        self.log.logger.debug('Parse URL: {0}'.format(response.url))
        domain = self.get_domain(response.url)
        if domain == settings.WEB_HOST:
            task_id = self.get_taskid(response.text)
            if task_id:
                timestamp, sign = self.get_sign()
                next_url = self.get_nexturl(timestamp, sign)
                meta_dict = {'task_id': task_id}
                yield scrapy.Request(url=next_url,headers=self.headers, dont_filter=True, callback=self.parse, meta=meta_dict)
        else:
            task_id = response.meta.get("task_id", 0)
            cookies=response.headers.getlist('Set-Cookie')
            token = self.get_token(cookies)
            cookieJar = response.meta.setdefault('cookie_jar', CookieJar())
            cookieJar.extract_cookies(response, response.request)
            timestamp, sign = self.get_sign(token)
            next_url = self.get_nexturl(timestamp, sign)
            meta_dict = {'task_id': task_id, 'dont_merge_cookies': True, 'cookie_jar': cookieJar, 'token': token}
            request = scrapy.Request(url=next_url, headers=self.headers, dont_filter=True, callback=self.parse_page, meta=meta_dict)
            cookieJar.add_cookie_header(request)  # apply Set-Cookie ourselves
            yield request

    def parse_page(self, response):
        meta_dict = response.meta
        offset = -50
        token = response.meta.get("token", "")
        cookieJar = response.meta.setdefault('cookie_jar', CookieJar())
        json_data = json.loads(common.extract_json(response.text.strip()))
        # print(json_data)
        total = int(json_data["data"]["count"])
        while offset+50 < total:
            offset += 50
            timestamp, sign = self.get_sign(token, offset)
            next_url = self.get_nexturl(timestamp, sign, offset)
            meta_dict.update({'offset': offset, 'total': total})
            request = scrapy.Request(url=next_url, headers=self.headers, dont_filter=True, callback=self.parse_detail,
                                     meta=meta_dict)
            cookieJar.add_cookie_header(request)  # apply Set-Cookie ourselves
            yield request

    def parse_detail(self, response):
        self.log.logger.debug('Parse Detail: {0}'.format(response.url))
        task_id = response.meta.get("task_id", 0)
        offset = response.meta.get("offset", 0)
        total = response.meta.get("total", 0)
        json_data = json.loads(common.extract_json(response.text.strip()))

        try:
            itemList = json_data["data"]["items"]
        except Exception as e:
            self.log.logger.exception(str(e))
            raise errors.JsonDecodeError

        for entry in itemList:
            item = TbQianggouItem()
            item["crawler_task_id"] = task_id
            item["item_id"] = entry["itemId"]
            item["title"] = entry["name"]
            item["selling_point"] = entry["selfSellingPoint"]
            item["pic_url"] = entry["picUrl"]
            item["price"] = entry["salePrice"]
            item["sold_num"] = entry["itemSoldNum"]
            item["sold_rate"] = entry["soldRate"]
            os_time = int(entry["ostime"])
            localtime = time.localtime(os_time / 1000)
            item["os_time"] = time.strftime('%Y-%m-%d %H:%M:%S', localtime)
            item["crawled_time"] = datetime.datetime.now()
            yield item

        if offset+50 >= total:
            self.set_task_done(task_id)