# -*- coding: utf-8 -*-


from typing import Dict, Any
import re
from io import BytesIO
from pathlib import Path
import requests
from lxml import etree
from fontTools.ttLib import TTFont
from maoyan.knn_font import Classify

"""
猫眼
"""




_headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': '__mta=151764870.1581567953870.1581568084519.1581568101926.15; uuid_n_v=v1; uuid=EB4AA2204E1811EABEE699B27D5D2449703E9C98873144F2862D521ACAFF60ED; _csrf=8cd8cf12075c33f9cb8f0c9284c962afa05385c72f6452171b9911a58bc4733d; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1581567953; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=1703ccbc8d5c8-0b926b2ef6cf95-6313f69-144000-1703ccbc8d6c8; _lxsdk=EB4AA2204E1811EABEE699B27D5D2449703E9C98873144F2862D521ACAFF60ED; mojo-uuid=ddad52faa621c8827c284ca233e6138e; mojo-session-id={"id":"e7cd101fa590d595e538ebfd067a6867","time":1581567953830}; __mta=151764870.1581567953870.1581568047900.1581568084519.14; mojo-trace-id=30; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1581568102; _lxsdk_s=1703ccbc8d7-a3b-722-f59%7C%7C42',
    'DNT': '1',
    'Host': 'maoyan.com',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'none',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36'
}

_classify = Classify()


# 获取字体文件编码
def get_map(text: str) -> Dict[str, Any]:
    # 载入字体
    _woff_path = Path(__file__).absolute().parent/"fonts"/"test.woff"
    # 提取html中woff地址
    woff_url = re.findall(r"url\('(.*?\.woff)'\)", text)[0]
    font_url = f"http:{woff_url}"
    # 获取html内容中的woff字体内容
    content = requests.get(font_url).content
    with open(_woff_path, 'wb') as f:
        f.write(content)
    # 新woff
    font = TTFont(BytesIO(content))
    # 字体标识
    glyf_order = font.getGlyphOrder()[2:]
    info = []
    for g in glyf_order:
        coors = font['glyf'][g].coordinates
        coors = [_ for c in coors for _ in c]
        info.append(coors)
    # 预测模型
    map_li = map(lambda x: str(int(x)), _classify.knn_predict(info))
    uni_li = map(lambda x: x.lower().replace('uni', '&#x') + ';', glyf_order)
    return dict(zip(uni_li, map_li))


# 获取html源码并解析字体
def get_contents(url: str) -> str:
    text = requests.get(url, headers=_headers).text
    map_dict = get_map(text=text)
    for uni in map_dict.keys():
        text = text.replace(uni, map_dict[uni])
    html = etree.HTML(text)
    return html


# 榜单-解析字体
def parse_board(html) -> None:
    dd_li = html.xpath('//dl[@class="board-wrapper"]/dd')
    for dd in dd_li:
        title = dd.xpath('./div[@class="board-item-main"]//div[@class="movie-item-info"]/p[@class="name"]/a/text()')[0]
        star = dd.xpath('./div[@class="board-item-main"]//div[@class="movie-item-info"]//p[@class="star"]/text()')[0]
        releasetime = dd.xpath('./div[@class="board-item-main"]//div[@class="movie-item-info"]//p[@class="releasetime"]/text()')[0]
        realtime_stont = "".join(dd.xpath('./div[@class="board-item-main"]//div[@class="movie-item-number boxoffice"]//p[@class="realtime"]//text()'))
        total_stont = "".join(dd.xpath('./div[@class="board-item-main"]//div[@class="movie-item-number boxoffice"]//p[@class="total-boxoffice"]//text()'))
        print(title)
        print(star)
        print(releasetime)
        print(realtime_stont)
        print(total_stont)
        print('-' * 50)

_board_url = "https://maoyan.com/board/1"
# 解析html字体编码
html = get_contents(_board_url)
# 提取数据
parse_board(html)
