import base64
import re
import time

import requests
from bs4 import BeautifulSoup


def fetch(page) -> list[int]:
    cookies = {
        'Hm_lvt_c99546cf032aaa5a679230de9a95c7db': '1714393239,1714544185',
        'qpfccr': 'true',
        'no-alert3': 'true',
        'tk': '-6217862902397066243',
        'sessionid': 'cusapbt27ln32rkuqmw3vfcsf8o5d237',
        'Hm_lpvt_c99546cf032aaa5a679230de9a95c7db': '1714544216',
    }

    headers = {
        'accept': 'application/json, text/javascript, */*; q=0.01',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cache-control': 'no-cache',
        # 'cookie': 'Hm_lvt_c99546cf032aaa5a679230de9a95c7db=1714393239,1714544185; qpfccr=true; no-alert3=true; tk=-6217862902397066243; sessionid=cusapbt27ln32rkuqmw3vfcsf8o5d237; Hm_lpvt_c99546cf032aaa5a679230de9a95c7db=1714544216',
        'pragma': 'no-cache',
        'priority': 'u=0, i',
        'referer': 'https://match.yuanrenxue.cn/match/4',
        'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'empty',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'same-origin',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
        'x-requested-with': 'XMLHttpRequest',
    }

    params = {
        'page': page,
    }

    response = requests.get('https://match.yuanrenxue.cn/api/match/4', params=params, cookies=cookies, headers=headers)
    tds = response.json()['info']
    soup = BeautifulSoup(tds, 'html.parser')
    ignore_key = find_ignore_key(soup)
    # print(soup.prettify())
    res = []
    for idx_td, td in enumerate(soup.find_all('td')):
        idx_in_td = 0
        imgs = []
        for idx_img, img in enumerate(td.find_all('img')):
            if ignore_key in img.get('class'):
                img.decompose()
                continue
            idx_in_td += 1
            src = img.get('src')
            flag = get_file_size_from_base64(src)
            number = get_number(str(flag))
            pos = idx_in_td + float(img.get('style')[5:-2]) / 11.5
            img['alt'] = f'{idx_td, idx_img, idx_in_td, pos, number}'
            img['data-pos'] = pos
            img['data-value'] = number
            img['style'] = ''
            # print(idx_td, idx_img, flag, img.get('style'))
            imgs.append(img)
        sorted_imgs = sorted(imgs, key=lambda k: k['data-pos'])
        val_list = [str(x['data-value']) for x in sorted_imgs]
        print(val_list)
        res.append(int(''.join(val_list)))
    open('match2_1.html', 'w').write(soup.prettify())
    return res


# 这里取巧了，大概率display:none的那个出现次数少
# var j_key = '.' + hex_md5(btoa(data.key + data.value).replace(/=/g, ''));
# 试了5次*5叶，有一个不通过
def find_ignore_key(soup: BeautifulSoup) -> str:
    key_group = {}
    for img in soup.find_all('img'):
        key = img.get('class')[1]
        if key in key_group:
            key_group[key] += 1
        else:
            key_group[key] = 1
    print(key_group)
    return min(key_group, key=key_group.get)


def get_number(key: str) -> int:
    dict = {
        '894': 0,
        '351': 1,
        '749': 2,
        '837': 3,
        '562': 4,
        '668': 5,
        '950': 6,
        '588': 7,
        '984': 8,
        '926': 9
    }
    return dict.get(key)


# 从base64编码字符串中计算文件大小
def get_file_size_from_base64(base64_string) -> int:
    image_data = base64.b64decode(re.sub('^data:image/.+;base64,', '', base64_string))
    # return len(base64_string) - (1214 - 894)
    return len(image_data)


def get_flag_from_base64(base64_string):
    return base64_string[-10:-5]


def main():
    res = 0
    for page in range(1, 6):
        time.sleep(1)
        item = fetch(page)
        res += sum(item)
    print(res)


if __name__ == '__main__':
    main()
