import requests
import json
import asyncio
# import re
# from collections import OrderedDict
import os
from datetime import datetime
from tqdm import tqdm
# import time
import sys
import pandas as pd

from utils.TTFUtil import *

Base_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
graph_str_obj = {}

# admauth=360100 鄱阳湖

def  save(data_list, filename):
    columns = ["流域", "行政区划", "河名", "站名", "时间", "水位(米)", "超警戒水位(米)"]
    filts = ["bsnm", "addvnm", "rvnm", "stnm", "tm", "z", "alertValue"]

    print(data_list)
    df = pd.DataFrame(data_list)
    df.columns = columns
    df.fillna("", inplace=True)
    print(df.head(10))
    #
    #
    #
    # file_path = os.path.join(filepath, filename)
    df.to_excel(filename, index=False)
    print(f"文件已保存到: {filename}")

def extract_data(obj):
    new_obj = {}
    for key, value in obj.items():

        part1 = None
        if isinstance(value, str) and "#FontTag" in value:
            # 方法1: 通过字符串查找和切片提取
            start_tag = "#"
            middle_tag = "otltag"
            end_tag = "#FontTag"

            # 提取第一个部分（mPgcfp7TXR_1725546532529）
            start_idx = value.find(start_tag) + len(start_tag)
            middle_idx = value.find(middle_tag)
            part1 = value[start_idx:middle_idx]

            # 提取第二个部分（㸫㫣）
            middle_idx += len(middle_tag)
            end_idx = value.find(end_tag)
            part2 = value[middle_idx:end_idx]

            # 输出结果
            # print("Part 1:", part1)  # 输出 mPgcfp7TXR_1725546532529
            # print("Part 2:", part2)  # 输出 㸫㫣
            new_obj[key] = part2

        else:
            new_obj[key] = value
    return new_obj, part1

# 查找对应的 glyph 的函数
def find_glyph_by_char(char, data):
    for item in data:
        if item[1] == char:
            return item[2]  # 返回对应的 glyph
    return None  # 如果没有找到，返回 None

def str_tihuan(str, fontKeyList, font_file):
    new_str = ""
    filter_str = "()（）. -·#、　?"
    # new_s =
    for s in str:
        if s in filter_str:
            new_str += s
        else:
            if s in graph_str_obj:
                new_s = graph_str_obj[s]
            else:
                try:
                    graphName = find_glyph_by_char(s, fontKeyList)
                    # print(f"********【【{s}】】映射{graphName}********")
                    new_s = ttf_to_char(font_file, glyph_set_name=graphName, temp_png_path=f"{Base_DIR}/Temp")
                    if new_s == "u":
                        new_s = "山"
                    graph_str_obj[s] = new_s
                except Exception as e:
                    print(e)
                    print(f"str:{str}====, s:{s}")
                    new_s = s
            new_str = new_str + new_s
            # print(f"字体识别并替换====混淆字体：{s}, 识别字体：{new_s}/n")

    return new_str
def obj_tihuan(obj,  fontKeyList, font_file):
    new_obj = {}
    filts = ["bsnm", "addvnm", "rvnm", "stnm", "tm", "z", "alertValue"]
    for key, value in obj.items():
        if key in filts:
            if isinstance(value, str) and value != "" and key != "createTime" and key != "tm":
                # print(f"====================================={value}, {key}")
                new_obj[key] = str_tihuan(value, fontKeyList, font_file)
            else:
                new_obj[key] = value
    return  new_obj


def crawl(file_path=f"{Base_DIR}/save"):
    cookies = {
        '__FT10000066': '2024-9-5-16-51-2',
        '__NRU10000066': '1725526262075',
        '__RT10000066': '2024-9-5-16-51-2',
    }

    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Connection': 'keep-alive',
        'Content-Type': 'application/json',
        # 'Cookie': '__FT10000066=2024-9-5-16-51-2; __NRU10000066=1725526262075; __RT10000066=2024-9-5-16-51-2',
        'Referer': 'http://xxfb.mwr.cn/sq_djdh.html?v=1.0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0',
        'X-Requested-With': 'XMLHttpRequest',
    }

    response = requests.get('http://xxfb.mwr.cn/OTMpfiwozTexvsf/OTMnprjsvUahsv', cookies=cookies, headers=headers, verify=False)
    datas = json.loads(response.text)['result']
    response.close()
    print("=======数据获取成功======")
    # print(datas)
    ttf_url = ""
    newDatas = []
    for i, data in enumerate(datas):
        # ttf_status = True
        data, ttf_url = extract_data(data)
        newDatas.append(data)
        # print(data, ttf_url)
        # ttf_status = False


    cookies = {
        '__FT10000066': '2024-9-5-16-51-2',
        '__NRU10000066': '1725526262075',
        '__RT10000066': '2024-9-5-16-51-2',
    }

    headers = {
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Connection': 'keep-alive',
        # 'Cookie': '__FT10000066=2024-9-5-16-51-2; __NRU10000066=1725526262075; __RT10000066=2024-9-5-16-51-2',
        'Referer': 'http://xxfb.mwr.cn/sq_djdh.html?v=1.0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0',
    }

    response = requests.get(f'http://xxfb.mwr.cn/ttf/{ttf_url}.ttf', cookies=cookies, headers=headers, verify=False)
    # file_name = os.path.basename(url)

    # 定义保存文件的路径
    full_path = f"{Base_DIR}/Temp" + "/ttf.ttf"

    # 保存文件
    with open(full_path, 'wb') as file:
        for chunk in response.iter_content(chunk_size=8192):  # 以 8KB 为单位写入文件
            if chunk:  # 忽略空的 chunk
                file.write(chunk)
    print(f"字体文件{ttf_url}已保存: {full_path}")
    print("=======解析字体映射关系=====")
    response.close()


    fontKeyList = get_char_list_from_ttf(full_path)
    # print(f"字体映射关系：{fontKeyList}")
    final_data = []
    print(f"*****************混淆字体绘图OCR识别替换********************")
    filts = ["bsnm", "addvnm", "rvnm", "stnm", "tm", "z", "alertValue"]
    for data in tqdm(newDatas):
        # if i == 5:
        #     break
        newObj = obj_tihuan(data, fontKeyList, full_path)
        # data_list = list(json.loads(response.text)['data'])
        # print(data_list)
        ordered_data = {key: newObj[key] for key in filts}
        # print(f"字体识别并替换====混淆字体==处理前{data}")
        # print(f"字体识别并替换====混淆字体==处理后{newObj}")
        final_data.append(ordered_data)
        # print(newObj)


    # today_date = datetime.now().strftime("%Y%m%d")
    # filename = f"{file_path}/大江大河-{today_date}.xlsx"
    # save(final_data, filename)
    return final_data

async def crawl_async(file_path=f"{Base_DIR}/save"):
    data = await crawl(file_path)
    return data

def crawl2(file_path=f"{Base_DIR}/save"):
    cookies = {
        '__FT10000066': '2024-9-5-16-51-2',
        '__NRU10000066': '1725526262075',
        '__RT10000066': '2024-9-5-16-51-2',
    }

    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Connection': 'keep-alive',
        'Content-Type': 'application/json',
        # 'Cookie': '__FT10000066=2024-9-5-16-51-2; __NRU10000066=1725526262075; __RT10000066=2024-9-5-16-51-2',
        'Referer': 'http://xxfb.mwr.cn/sq_djdh.html?v=1.0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0',
        'X-Requested-With': 'XMLHttpRequest',
    }

    response = requests.get('http://xxfb.mwr.cn/OTMpfiwozTexvsf/OTMnprjsvUahsv', cookies=cookies, headers=headers, verify=False)
    datas = json.loads(response.text)['result']
    response.close()
    print("=======数据获取成功======")
    # print(datas)
    ttf_url = ""
    newDatas = []
    for i, data in enumerate(datas):
        # ttf_status = True
        data, ttf_url = extract_data(data)
        newDatas.append(data)
        # print(data, ttf_url)
        # ttf_status = False


    cookies = {
        '__FT10000066': '2024-9-5-16-51-2',
        '__NRU10000066': '1725526262075',
        '__RT10000066': '2024-9-5-16-51-2',
    }

    headers = {
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Connection': 'keep-alive',
        # 'Cookie': '__FT10000066=2024-9-5-16-51-2; __NRU10000066=1725526262075; __RT10000066=2024-9-5-16-51-2',
        'Referer': 'http://xxfb.mwr.cn/sq_djdh.html?v=1.0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0',
    }

    response = requests.get(f'http://xxfb.mwr.cn/ttf/{ttf_url}.ttf', cookies=cookies, headers=headers, verify=False)
    # file_name = os.path.basename(url)

    # 定义保存文件的路径
    full_path = f"{Base_DIR}/Temp" + "/ttf.ttf"

    # 保存文件
    with open(full_path, 'wb') as file:
        for chunk in response.iter_content(chunk_size=8192):  # 以 8KB 为单位写入文件
            if chunk:  # 忽略空的 chunk
                file.write(chunk)
    print(f"字体文件{ttf_url}已保存: {full_path}")
    print("=======解析字体映射关系=====")
    response.close()


    fontKeyList = get_char_list_from_ttf(full_path)
    # print(f"字体映射关系：{fontKeyList}")
    final_data = []
    print(f"*****************混淆字体绘图OCR识别替换********************")
    filts = ["bsnm", "addvnm", "rvnm", "stnm", "tm", "z", "alertValue"]
    for data in tqdm(newDatas):
        # if i == 5:
        #     break
        newObj = obj_tihuan(data, fontKeyList, full_path)
        # data_list = list(json.loads(response.text)['data'])
        # print(data_list)
        ordered_data = {key: newObj[key] for key in filts}
        # print(f"字体识别并替换====混淆字体==处理前{data}")
        # print(f"字体识别并替换====混淆字体==处理后{newObj}")
        final_data.append(ordered_data)
        # print(newObj)


    today_date = datetime.now().strftime("%Y%m%d_%H%M")
    filename = f"大江大河.xlsx"
    p = f"{file_path}/{filename}"
    save(final_data, p)
    return filename
