import os
import re
import sys
import threading

import requests as req
from bs4 import BeautifulSoup
from fontTools.ttLib import TTFont

path = os.path.abspath(os.path.dirname(sys.argv[0]))


def analyze_search_html(search_html):
    soup = BeautifulSoup(search_html, 'lxml')
    # 先获取woff文件的链接
    woff_url = re.search('/css/woff/\\d+.woff', search_html, re.S).group()
    local_woff_path = save_dynamic_woff(woff_url)
    nums_dict = get_nums_dict(local_woff_path)
    hansans_dict = get_hansans_dict()
    each_div_list = soup.select('div.result-2 div.each')
    for each_div in each_div_list:
        item_data = {}
        # 名称
        origin_name = each_div.select('div.tit h3.secretfont')[0].text
        translate_name = translate_secretfont(hansans_dict, origin_name)
        item_data['name'] = translate_name
        # 注册地址
        origin_address = each_div.select('div.info')[1].select('p.secretfont')[0].text
        translate_address = translate_secretfont(hansans_dict, origin_address)
        item_data['address'] = translate_address
        # 统一社会信用代码
        origin_code = each_div.select('div.info')[0].select('p.secretfontT')[0].text
        translate_code = translate_secretfontT(nums_dict, origin_code)
        item_data['code'] = translate_code
        # 成立日期
        origin_date = each_div.select('div.info')[0].select('p.secretfontT')[1].text
        translate_date = translate_secretfontT(nums_dict, origin_date)
        item_data['date'] = translate_date
        # 登记号
        origin_reg_code = each_div.select('div.info')[0].select('p.secretfontT')[2].text
        translate_reg_code = translate_secretfontT(nums_dict, origin_reg_code)
        item_data['reg_code'] = translate_reg_code
        # 经营期限
        origin_business_limit = each_div.select('div.info')[1].select('p.secretfontT')[0].text
        translate_business_limit = translate_secretfontT(nums_dict, origin_business_limit)
        item_data['business_limit'] = translate_business_limit
        print(item_data)


def get_hansans_dict():
    # 未搞懂引用之后的相对路径机制，为什么是引用方的路径，待解决
    hansans_path = path + r'\utils\woff\SourceHanSansCN-Normal-2500-table.txt'
    hansans_dict = {}
    with open(hansans_path, encoding='utf8') as f:
        for line in f.readlines():
            eles = line.split('---')
            hansans_dict[eles[0]] = eles[2].strip()
    return hansans_dict


def get_nums_dict(file_name):
    font = TTFont(file_name)
    cmap = font.getBestCmap()
    font.close()
    os.remove(file_name)
    if len(cmap) == 72:
        key_list = []
        index_count = 0
        for k, v in cmap.items():
            key_list.append(str(chr(k)) if index_count < 36 else str(hex(k)))
            index_count += 1
        return dict(zip(key_list[36:], key_list[0:36]))
    raise ValueError('解析的字体文件有问题，请排查！')


def translate_secretfont(hansans_dict, origin_name):
    unicode_origin_name_list = origin_name.encode('unicode-escape').decode().split(r'\u')
    tn_list = []
    for charx in unicode_origin_name_list:
        if charx is not None and len(charx) > 0:
            key = 'uni' + charx.upper()
            if hansans_dict.__contains__(key):
                tn_list.append(hansans_dict[key])
            else:
                tn_list.append((r'\u' + charx).encode().decode('unicode-escape'))
    return ''.join(tn_list)


def translate_secretfontT(nums_dict, origin_code):
    tc_list = []
    unicode_origin_code_list = origin_code.encode('unicode-escape').decode().split(r'\u')
    for codex in unicode_origin_code_list:
        mid_line_bool = False
        if codex is not None and len(codex.strip()) > 0:
            codex = codex.strip()
            if codex.find('-') >= 0:
                codex = codex.split('-')[0]
                mid_line_bool = True
            key = '0x' + codex
            if nums_dict.__contains__(key):
                tc_list.append(nums_dict[key])
            else:
                tc_list.append((r'\u' + codex).encode().decode('unicode-escape'))
            if mid_line_bool:
                tc_list.append('-')
    return ''.join(tc_list)


def save_dynamic_woff(compared_url):
    url = 'https://ss.cods.org.cn/' + compared_url
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
    }
    resp = req.get(url=url, headers=headers)
    file_name = threading.currentThread().name + '_' + compared_url.split('/')[-1]
    with open(file_name, mode='wb') as ff:
        ff.write(resp.content)
    return file_name


if __name__ == '__main__':
    with open('cods_search_list.html', encoding='utf8', mode='r') as f:
        analyze_search_html(f.read())
