# coding = utf-8
import time
import os
import json
import re
from tools.spider_utils import write, read
from browser import JS
from bs4 import BeautifulSoup as bs


def log_getattribute(cls):
    orig_getattribute = cls.__getattribute__

    def getattribute(self, name):
        print('Calling {name} from {cls_name}'.format(name=name, cls_name=cls.__name__))
        return orig_getattribute(self, name)

    return cls


def count_time(func):
    def int_time(*args, **kwargs):
        start_time = time.time()  # 程序开始时间
        r = func(*args, **kwargs)
        over_time = time.time()  # 程序结束时间
        total_time = over_time - start_time
        print('{}共计{}秒'.format(func.__name__, total_time))
        return r

    return int_time


class Tmall(object):

    def __init__(self, bro="") -> None:
        # pc端  "https://list.tmall.com/search_product.htm?type=pc&q={}&sort=s&style=g&from=.list.pc_1_searchbutton&jumpto={}"
        # 手机端"https://list.tmall.com/m/search_items.htm?page_size=20&page_no=308&q=%B3%C4%C9%C0&type=p&tmhkh5=&spm=a220m.8599659.a2227oh.d100&from=mallfp..m_1_searchbutton&searchType=default&closedKey="
        self.pageurl = "https://list.tmall.com/search_product.htm?type=pc&q={}&sort=s&style=g&from=.list.pc_1_searchbutton&jumpto={}"
        self.detailurl = "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.1.30eb5f4c9J0Hz9&id={}&areaId=321300&cat_id=2&is_b=1"
        self.js = JS()
        if bro:
            self.s = bro
            self.get_cookies()
        pass

    # todo  json数据全要
    def get_info(self, html):
        r = {}
        # 获取源码中json
        d = re.findall("TShop.Setup\((.*?)\)\;", html, re.S)[0]
        j = json.loads(d)
        # 标题
        r["title"] = j["itemDO"]["title"]
        # 解析html
        body = bs(html, "html.parser")
        # 地区
        J_FrmBid = body.find("form", id="J_FrmBid")
        r["region"] = J_FrmBid.findChild(
            "input", attrs={'name': 'region'}).get("value")

        # 参数
        r["attribute"] = {}
        J_Attrs = body.find("div", id="J_Attrs")
        if J_Attrs:
            J_Attrs_tbody = J_Attrs.find("tbody")
            th = J_Attrs_tbody.find_all("th")
            td = J_Attrs_tbody.find_all("td")
            th2 = [i for i in th if not i.get("colspan")]
            r["attribute"].update(
                {a.text: b.text.replace("\xa0", "") for a, b in zip(th2, td)})
        J_AttrList = body.find("div", class_="attributes-list")
        # 参数2

        if J_AttrList:
            J_Attrs = [i.split(":") for i in J_AttrList.text.split(
                "\n") if i.find("\xa0") > -1]
            r["attribute"].update({a: b.replace("\xa0", "")
                                   for a, b in J_Attrs})
        # exit()
        # 图片+价格
        r["skuList"] = j["valItemInfo"]["skuList"]
        r["propertyPics"] = j["propertyPics"]
        r["skuMap"] = j["valItemInfo"]["skuMap"]
        r["code"] = r["skuList"][0]["pvs"]
        r["img"] = r["propertyPics"][";" + r["code"].split(";")[-1] + ";"][0].replace(
            "//img.alicdn.com", "https://img.alicdn.com")
        r["price"] = r["skuMap"][";" + r["code"] + ";"]["price"]
        del r['propertyPics']
        del r['skuMap']
        del r['skuList']
        del r['code']

        return r

    def get_ids(self, html):
        html = bs(html, "html.parser")
        ids = html.find_all("div", "product")
        for i in ids:
            id = i.get("data-id")
            yield id

    def get_cookies(self):
        write("cookies.txt", json.dumps(self.s.get_cookies()))

    def set_cookies(self):
        c = read("cookies.txt")
        c = json.loads(c)
        for i in c:
            self.s.add_cookie(i)  # 注入cookies

    def get_one_page(self, name, page, outpath=""):
        url = self.pageurl.format(name, page)
        html = self.get_html(url)
        if outpath:
            write(outpath, html)
        return self.get_ids(html)

    def get_one_detail(self, name, page, outpath=""):
        url = self.detailurl.format(page)
        html = self.get_html(url)
        if outpath:
            write(outpath, html)
        return html

    def get_html(self, url):
        self.set_cookies()
        html = self.js.get_by_js(self.s, url)
        if html.find("亲，小二正忙，滑动一下马上回来") > -1:
            self.check()
        self.get_cookies()
        return html

    def get_ids_url(self, filepath, name, page):
        outpath = os.path.join(filepath.format(name, page.zfill(3)))
        if not os.path.exists(outpath):
            ids = self.get_one_page(name, page, outpath=outpath)
            return ids
        else:
            print("ex", outpath)
            return self.get_ids_file(filepath, name, page)

    def get_ids_file(self, filepath, name, page):
        path = filepath.format(name, page.zfill(3))
        if os.path.exists(path):
            html = read(path)
            ids = self.get_ids(html)
            return ids
        return []

    def get_info_url(self, filepath, name, id):
        outpath = filepath.format(name, id)
        if not os.path.exists(outpath):
            html = self.get_one_detail(name, id, outpath=outpath)
            result = self.get_info(html)
            return result
        else:
            print("ex", outpath)
            return self.get_info_file(filepath, name, id)

    def get_info_file(self, filepath, name, id):
        path = filepath.format(name, id)
        if os.path.exists(path):
            html = read(path)
            result = self.get_info(html)
            return result
        return {}

    def check(self, ):
        input("check !!")
