import datetime
import time
from sys import exception

import pandas as pd
import requests
import json
import jsonpath
import threading
import os
import csv

# 导入各个字段提取模块（根据实际路径调整）
from comment.config import CONF_ROOT
from compements.assemblies.suifangdownload1 import suifangdownload1

# 获取Cookie模块
from comment.cookie_find import get_cookies

# 全局配置

RESULT_CSV = CONF_ROOT / "./执行结果/result.csv"
ERROR_EXCEL = CONF_ROOT / "./执行结果/问题文档.xlsx"
CONFIG_FILE = CONF_ROOT / "./文档/config.txt"  # 新增配置文件
DOMAIN, COOKIE, JIGOUMA = get_cookies()
HOST = DOMAIN.strip("http://")
print("domain:", DOMAIN, "cookie:", COOKIE, "jigouma:", JIGOUMA, "HOST", HOST)
cookie_lock = threading.Lock()
csv_lock = threading.Lock()
error_lock = threading.Lock()
quarterly_stats_lock = threading.Lock()

def simplified_quarterly_statistics(id_number, date_list, start_limit_time, end_limit_time):
    """
    生成指定身份证号的季度统计数据并保存到Excel文件中

    参数:
    id_number (str): 身份证号
    date_list (list): 日期列表，格式为 'YYYY-MM-DD'
    start_limit_time (str): 开始时间，格式为 'YYYY-MM-DD'
    end_limit_time (str): 结束时间，格式为 'YYYY-MM-DD'
    """
    id_number = "'"+id_number
    # 使用全局线程锁来保护共享资源
    with quarterly_stats_lock:
        # 解析日期范围
        start_date = datetime.strptime(start_limit_time, '%Y-%m-%d')
        end_date = datetime.strptime(end_limit_time, '%Y-%m-%d')

        # 计算包含的季度
        quarter_headers = []

        # 计算起始和结束年份及季度
        start_year = start_date.year
        start_quarter = (start_date.month - 1) // 3 + 1

        end_year = end_date.year
        end_quarter = (end_date.month - 1) // 3 + 1

        # 生成季度列表
        current_year = start_year
        current_quarter = start_quarter

        while True:
            # 添加当前季度到列表
            year_short = str(current_year)[2:]  # 取年份后两位
            quarter_headers.append(f"{year_short}年Q{current_quarter}")

            # 如果到达结束季度，则退出循环
            if current_year == end_year and current_quarter == end_quarter:
                break

            # 移动到下一个季度
            current_quarter += 1
            if current_quarter > 4:
                current_quarter = 1
                current_year += 1

        # 将日期字符串转换为datetime对象
        dates = []
        for date in date_list:
            if date:  # 确保日期不为空
                try:
                    dates.append(datetime.strptime(date, '%Y-%m-%d'))
                except ValueError:
                    print(f"日期格式错误: {date}")
                    continue

        # 按季度分组统计
        quarterly_data = {}

        for date in dates:
            # 检查日期是否在范围内
            if start_date <= date <= end_date:
                # 计算季度 (Q1: 1-3月, Q2: 4-6月, Q3: 7-9月, Q4: 10-12月)
                year_short = str(date.year)[2:]
                quarter = (date.month - 1) // 3 + 1
                quarter_key = f"{year_short}年Q{quarter}"

                # 统计每个季度的次数
                if quarter_key in quarterly_data:
                    quarterly_data[quarter_key] += 1
                else:
                    quarterly_data[quarter_key] = 1

        # 创建输出目录
        output_dir = "./执行结果/季度统计"
        os.makedirs(output_dir, exist_ok=True)

        # 构造数据行
        row_data = {'身份证号': id_number}
        for header in quarter_headers:
            row_data[header] = quarterly_data.get(header, 0)

        # 添加随访日期列，将date_list中的所有日期用逗号连接
        row_data['随访日期'] = ','.join(date_list) if date_list else ""

        # 创建DataFrame
        df_new = pd.DataFrame([row_data])

        # 生成文件名
        filename = f"{output_dir}/季度统计结果_{start_limit_time}_至_{end_limit_time}.xlsx"

        # 检查文件是否存在
        if os.path.exists(filename):
            # 读取现有数据
            df_existing = pd.read_excel(filename)
            # 合并数据
            df_combined = pd.concat([df_existing, df_new], ignore_index=True)
            # 保存到Excel
            df_combined.to_excel(filename, index=False)
        else:
            # 直接保存新数据
            df_new.to_excel(filename, index=False)

        print(f"季度统计数据已保存至: {filename}")


# 定时更新 Cookie 的函数

def update_cookies_periodically():
    global COOKIE, DOMAIN, JIGOUMA, HOST  # 添加 HOST
    while True:
        time.sleep(1200)  # 每隔 20 分钟更新一次
        with cookie_lock:
            DOMAIN, COOKIE, JIGOUMA = get_cookies()
            HOST = DOMAIN.strip("http://")  # 同步更新HOST
            print("Cookie 和相关配置已更新")



# 加载配置文件
def load_config():
    """加载配置文件，如果没有则创建默认配置"""
    default_config = {
        "delay_ms": 500  # 默认延迟500毫秒
    }

    if not os.path.exists(CONFIG_FILE):
        with open(CONFIG_FILE, "w", encoding="utf-8") as f:
            for key, value in default_config.items():
                f.write(f"{key}={value}\n")
        return default_config
    else:
        config = {}
        with open(CONFIG_FILE, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line and "=" in line:
                    key, value = line.split("=", 1)
                    config[key.strip()] = value.strip()
        # 确保所有默认值都存在
        for key, value in default_config.items():
            if key not in config:
                config[key] = value
        return config


# 更新配置文件
def update_config(delay_ms):
    """更新配置文件"""
    with open(CONFIG_FILE, "w", encoding="utf-8") as f:
        f.write(f"delay_ms={delay_ms}\n")


# 加载配置
CONFIG = load_config()
DELAY_SECONDS = float(CONFIG["delay_ms"]) / 1000  # 转换为秒


# 记录错误信息
def record_error_id(id_person, error_msg):
    """记录错误信息到Excel"""
    error_data = {
        "身份证号": ["'"+str(id_person)],
        "异常原因": [error_msg],
    }

    with error_lock:
        if os.path.exists(ERROR_EXCEL):
            df = pd.read_excel(ERROR_EXCEL)
            new_row = pd.DataFrame(error_data)
            df = pd.concat([df, new_row], ignore_index=True)
        else:
            df = pd.DataFrame(error_data)
        df.to_excel(ERROR_EXCEL, index=False)


# 保存数据
def save_to_csv(data, filename=RESULT_CSV):
    """使用pandas写入CSV"""
    # 只处理列表类型的数据
    if not isinstance(data, list):
        return False

    if not data:
        return False

    try:
        with csv_lock:
            # 处理列表数据
            df_new = pd.DataFrame(data)

            if os.path.exists(filename):
                df_existing = pd.read_csv(filename)
                df_combined = pd.concat([df_existing, df_new], ignore_index=True)
                df_combined.to_csv(filename, index=False, encoding='utf-8-sig')
            else:
                df_new.to_csv(filename, index=False, encoding='utf-8-sig')

            return True
    except Exception as e:
        print(f"CSV写入失败: {str(e)}")
        return False



# 初始化线程进度文件
# 修改初始化线程环境的函数
def init_thread_env(thread_id, total, completed_initial=0):
    """初始化线程进度文件
    Args:
        thread_id: 线程ID
        total: 总任务数（已处理+待处理）
        completed_initial: 初始已完成数量
    """
    filename = f"./执行结果/env_thread_{thread_id}.txt"
    if not os.path.exists(filename):
        with open(filename, "w", encoding="utf-8") as f:
            f.write(f"总操作数:{total}\n")
            f.write("当前处理身份证号:\n")
            f.write(f"已完成数量:{completed_initial}\n")
            f.write("当前索引:0\n")


# 更新线程进度
def update_thread_env(thread_id, current_id, completed, current_index):
    """更新线程进度"""
    filename = f"./执行结果/env_thread_{thread_id}.txt"
    with open(filename, "r", encoding="utf-8") as f:
        lines = f.readlines()
        total = lines[0].strip()

    with open(filename, "w", encoding="utf-8") as f:
        f.write(total + "\n")
        f.write(f"当前处理身份证号:{current_id}\n")
        f.write(f"成功完成数量:{completed}\n")
        f.write(f"已经处理数量:{current_index}\n")


# 线程进度
def get_thread_progress(thread_id):
    """获取线程进度"""
    filename = f"./执行结果/env_thread_{thread_id}.txt"
    if os.path.exists(filename):
        with open(filename, "r", encoding="utf-8") as f:
            lines = f.readlines()
            return {
                "total": int(lines[0].split(":")[1]),
                "current_id": lines[1].split(":")[1].strip(),
                "completed": int(lines[2].split(":")[1]),
                "index": int(lines[3].split(":")[1])
            }
    return None


# 核心爬虫逻辑
def crawler_program(id_person,start_limit_time,end_limit_time):
    """核心爬虫逻辑"""
    try:
        url = F"{DOMAIN}/phis/app/ehr?limit=26&start=0&_dch=1752211623890"
        headers = {
            "accept": "application/json",
            "content-Type": "application/json",
            "cookie": f"JSESSIONID={COOKIE}",
            "Host": HOST,
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"
        }

        json_str = '{"ehrBaseFilterMap":{"EQ_gender":"","GTE_birthDate":"","LTE_birthDate":"","LIKE_innerCode":"%","GTE_dateCreated":"","LTE_dateCreated":"","EQ_signTeamCode":"","LIKE_addrCode":null,"EQ_curContract":"","EQ_individualConStatus":"","EQ_creator":"","EQ_idNumber":"身份证号","LIKE_ehrCode":"%","LIKE_mngOrgCode":"机构码%","LIKE_nameIndex":"%"},"ehrHfIndictorMap":{},"ehrClassifyGrFilterMap":{},"ehrClassifyCdFilterMap":{},"ehrClassifySpFilterMap":{},"grRelation":"OR","cdRelation":"OR","spRelation":"OR","fetchIdType":true,"fetchFamily":true,"fetchHasVSvcFlwChronicWf":true,"fetchHasFirstSoap":true,"fetchHasFlwMental":true,"fetchHasSvcExam1":true,"fetchHasAsmOldS":true,"fetchHasAsmYear":true,"fetchChronic":true,"fetchHasAsmOldA":true}'.replace("身份证号", id_person.strip("'")).replace("机构码", JIGOUMA)
        res = requests.post(url, headers=headers, data=json_str, timeout=30)
        if res.status_code != 200:
            record_error_id(id_person, "搜索人员时没成功")
            raise Exception(f"HTTP状态码: {res.status_code}")
        try:
            json_data = json.loads(res.text)
        except json.JSONDecodeError:
            try:
                res = requests.post(url, headers=headers, data=json_str, timeout=30)
                if res.status_code != 200:
                    record_error_id(id_person, "搜索人员时没成功")
                    raise Exception(f"HTTP状态码: {res.status_code}")
                json_data = json.loads(res.text)
                print("JSON解析错误，正在重试...")
            except Exception as e:
                record_error_id(id_person, "搜索人员时没成功")
                return None


        content_result = jsonpath.jsonpath(json_data, '$.content')
        if content_result == [[]]:
            raise TypeError("暂无建档")
        first_content = content_result[0][0]

        # 直接从content中提取需要的字段
        json_info = first_content.get('id')
        ehrid = first_content.get('ehrId')


        print("重要数据获取：", "1:", json_info, "2:", ehrid)


        # 随访数据获取，以及重要元素
        infomation_all = suifangdownload1(id_person,ehrid, {
            "cookie": f"JSESSIONID={COOKIE}",
        }, DOMAIN, start_limit_time, end_limit_time)

        if infomation_all ==  None:
            # 添加容错，回滚
            print(f"处理失败: {id_person}, 错误: 这个人数据获取时出错,重试中···")
            infomation_all = suifangdownload1(id_person, ehrid, {
                "cookie": f"JSESSIONID={COOKIE}",
            }, DOMAIN, start_limit_time, end_limit_time)
            if infomation_all == None:
                record_error_id(id_person, "这个人数据获取时出错")
                return None

        elif infomation_all == []:
            record_error_id(id_person, f"{start_limit_time}-{end_limit_time}无随访记录")
            print(f"处理失败: {id_person}, 错误: {start_limit_time}-{end_limit_time}无随访记录")
            return None
        elif infomation_all == "没有慢病随访按钮":
            record_error_id(id_person, "没有慢病随访按钮")
            print(f"处理失败: {id_person}, 错误: 没有慢病随访按钮")
            return None
        else:
            pass

        results = []
        for result in infomation_all:
            print(f"获取到数据: {id_person}")
            # 从result中提取指定字段
            selected_fields = [
                '身份证号',	'随访类型',	'随访日期'	,'此次随访分类'	,'低血糖反应'	,'血压随访分类',	'血糖随访分类',	'症状',	'症状编辑框',	'随访机构',	'随访人',	'随访方式',	'是否加入国家标准版'	,'姓名',	'性别'	,'出生日期',	'所属人群',	'慢病类型'	,'其他慢病合并症',	'服药依从性','药物不良反应',	'药物不良反应描述',	'下次随访日期',	'身高'	,'收缩压'	,'舒张压'	,'体重',	'目标体重'	,'体质指数(BMI)',	'目标体质指数(BMI)'	,'心率',	'目标心率',	'足背动脉搏动触及正常',	'足背动脉搏动减弱'	,'足背动脉搏动消失',	'其他体征',	'日吸烟量',	'目标日吸烟量'	,'日饮酒量',	'目标日饮酒量',	'运动次数',	'目标运动次数'	,'运动时间'	,'目标运动时间',	'主食量'	,'目标主食量',	'空腹血糖',	'餐后血糖'	,'糖化血红蛋白'	,'糖化血红蛋白日期',	'TC'	,'TG'	,'HDL-C',	'LDL-C',	'BUN',	'Cr'	,'肌酐清除率'	,'尿检',	'尿微量白蛋白'	,'心电图'	,'眼底',	'其他辅助检查'	,'是否糖尿病',	'是否老年人',	'是否咳嗽、咳痰≥2周'	,'是否痰中带血或咯血',	'转诊原因',	'机构及科别',	'评估'	,'生活方式',	'医生建议',	'用药情况',
            ]
            # 直接提取字段，不使用函数
            extracted_result = {}
            for field in selected_fields:
                extracted_result[field] = result.get(field, "")
                if extracted_result[field] == "":
                    print("识别到为空字符的字串有：",field)
            results.append(extracted_result)
        print(results)
        return results
    except TypeError as err:
        print(f"处理失败: {id_person}, 错误: {err}")
        record_error_id(id_person, "暂无建档")
        return None
    except ConnectionError:
        record_error_id(id_person, "网络加载问题，要重新运行此人")
        print(f"处理失败: {id_person}, 错误: 网络加载问题，要重新运行此人")
        return None
    except Exception as e:
        record_error_id(id_person, str(e))
        print(f"处理失败: {id_person}, 错误: {str(e)}")
        return None


# 修改worker函数
def worker(thread_id, data_slice,start_limit_time, end_limit_time):

    """线程工作函数"""
    progress = get_thread_progress(thread_id)  # 获取当前线程进度

    # 设置总任务数为数据切片的长度
    total_tasks = len(data_slice)

    if progress:
        start_index = progress["index"]
        completed = progress["completed"]

        # 更新总任务数为记录中的值（保持一致性）
        # 但在重新开始时，我们应该使用实际分配的数据量
    else:
        start_index = 0
        completed = 0
        # 初始化环境文件，总任务数设为当前分配的任务数
        init_thread_env(thread_id, total_tasks, completed)

    # 重新读取env文件中的总任务数，确保一致性
    env_info = get_thread_progress(thread_id)
    if env_info and env_info["total"] != total_tasks:
        # 如果总任务数发生变化，更新env文件
        filename = f"./执行结果/env_thread_{thread_id}.txt"
        with open(filename, "r", encoding="utf-8") as f:
            lines = f.readlines()

        with open(filename, "w", encoding="utf-8") as f:
            f.write(f'总操作数:{int(start_index)+total_tasks}\n')  # 更新总任务数
            f.writelines(lines[1:])  # 保留其他行

    for idx in range(0, len(data_slice)):
        id_person = str(data_slice[idx])
        idx += start_index
        update_thread_env(thread_id, id_person, completed, idx)
        print(f"{thread_id}线程正在获取: {id_person}信息")

        result_end = crawler_program(id_person,start_limit_time,end_limit_time)

        if result_end:


            if save_to_csv(result_end):
                completed += 1
                update_thread_env(thread_id, id_person, completed, idx + 1)
                print(f"{thread_id}保存成功: {id_person}")
                sf_dates = [item['随访日期'] for item in result_end if '随访日期' in item and item['随访日期']]

                # 调用季度统计函数（这里需要一个模拟的driver参数处理）
                # 由于当前环境没有实际的driver，需要修改quarterly_statistics函数以支持无driver模式
                # 或者创建一个简化版本的季度统计函数

                # 为保持现有结构，我们创建一个简化版的季度统计实现
                simplified_quarterly_statistics(id_person, sf_dates,start_limit_time, end_limit_time)
            else:
                print(f"保存失败: {id_person}")
        else:
            if idx == len(data_slice) - 1:
                update_thread_env(thread_id, id_person, completed, idx + 1)
                print(f"数据处理失败: {id_person}")
                continue
            else:
                update_thread_env(thread_id, id_person, completed, idx + 1)
                print(f"数据处理失败: {id_person}")


        progress_percent = ((idx + 1) / total_tasks) * 100 if total_tasks > 0 else 0
        print(f"[线程 {thread_id}] 进度: {(idx + 1)}/{total_tasks} ({progress_percent:.1f}%)")


        # 每次处理完一条数据后暂停
        print(f"{thread_id}正在等待")
        time.sleep(DELAY_SECONDS)
        print(f"{thread_id}等待完成")


def split_ids(id_list, thread_count):
    total = len(id_list)
    base_size = total // thread_count
    remainder = total % thread_count

    chunks = []
    start = 0
    for i in range(thread_count):
        # 修改分配逻辑：后 `remainder` 个线程每个多一个元素
        size = base_size + (1 if i >= (thread_count - remainder) else 0)
        chunks.append(id_list[start:start + size])
        start += size
    return chunks


# 生成季度统计表函数
import pandas as pd
from datetime import datetime
import os
import math


def create_quarterly_template(start_limit_time, end_limit_time):
    """
    根据时间范围创建季度统计模板Excel文件

    参数:
    start_limit_time (str): 开始时间，格式为 'YYYY-MM-DD'
    end_limit_time (str): 结束时间，格式为 'YYYY-MM-DD'

    返回:
    str: 生成的Excel文件路径
    """

    # 解析日期
    start_date = datetime.strptime(start_limit_time, '%Y-%m-%d')
    end_date = datetime.strptime(end_limit_time, '%Y-%m-%d')

    # 计算包含的季度
    quarter_headers = []

    # 计算起始和结束年份及季度
    start_year = start_date.year
    start_quarter = (start_date.month - 1) // 3 + 1

    end_year = end_date.year
    end_quarter = (end_date.month - 1) // 3 + 1

    # 生成季度列表
    current_year = start_year
    current_quarter = start_quarter

    while True:
        # 添加当前季度到列表
        year_short = str(current_year)[2:]  # 取年份后两位
        quarter_headers.append(f"{year_short}年Q{current_quarter}")

        # 如果到达结束季度，则退出循环
        if current_year == end_year and current_quarter == end_quarter:
            break

        # 移动到下一个季度
        current_quarter += 1
        if current_quarter > 4:
            current_quarter = 1
            current_year += 1

    # 创建输出目录
    output_dir = "./执行结果/季度统计"
    os.makedirs(output_dir, exist_ok=True)

    # 创建空的DataFrame，只包含表头
    columns = ['身份证号'] + quarter_headers +["随访日期"]
    df = pd.DataFrame(columns=columns)

    # 生成文件名
    filename = f"{output_dir}/季度统计结果_{start_limit_time}_至_{end_limit_time}.xlsx"

    # 保存到Excel
    df.to_excel(filename, index=False)

    print(f"季度统计结果已保存至: {filename}")
    return filename


def main():
    """主程序"""
    os.makedirs("执行结果", exist_ok=True)
    # 创建输出目录


    with open(CONF_ROOT / "./文档/admin.txt", "r", encoding="utf-8") as F:
        lines = F.readlines()
        start_limit_time = lines[5].strip()
        end_limit_time = lines[6].strip()
    output_dir = "./执行结果/季度统计"
    os.makedirs(output_dir, exist_ok=True)
    # 在main函数中添加
    create_quarterly_template(start_limit_time, end_limit_time)

    # 创建一个线程，用于定时更新cookies
    cookie_thread = threading.Thread(target=update_cookies_periodically, daemon=True)
    cookie_thread.start()
    try:
        # 重新加载配置，以便在运行时可以修改
        global CONFIG, DELAY_SECONDS
        CONFIG = load_config()
        DELAY_SECONDS = float(CONFIG["delay_ms"]) / 1000

        print(f"当前配置: 每条数据处理后延迟 {CONFIG['delay_ms']} 毫秒")
        # 文件路径
        current_dir = CONF_ROOT / "./文档"
        excel_files = [f for f in os.listdir(current_dir) if f.endswith('.xlsx')]
        if excel_files:
            first_excel = os.path.join(current_dir, excel_files[0])
            print(f"正在处理文件: {first_excel}")

            df = pd.read_excel(first_excel, dtype={"身份证号": str})  # 确保身份证号作为字符串读取
        else:
            print("当前目录下没有找到Excel文件")

        if "身份证号" not in df.columns:
            raise ValueError("缺少身份证号列")

        id_list = df["身份证号"].astype(str).unique().tolist()
        print(f"总身份证号数量: {len(id_list)}")

        # 过滤已处理的ID
        processed_ids = set()
        if os.path.exists(RESULT_CSV):
            with open(RESULT_CSV, 'r', encoding='utf-8-sig') as f:
                reader = csv.DictReader(f)
                seen = set()
                unique_rows = []
                for row in reader:
                    # 按身份证号去重，你可以替换为其他字段
                    id_key = row.get("身份证号", "").strip().strip("'")
                    if id_key not in seen:
                        seen.add(id_key)
                        unique_rows.append(row)
                # 提取身份证号集合
                processed_ids = {row.get("身份证号", "").strip().strip("'") for row in unique_rows}

        # 添加对错误ID文件的处理
        if os.path.exists(ERROR_EXCEL):
            try:
                error_df = pd.read_excel(ERROR_EXCEL)
                if "身份证号" in error_df.columns:
                    # 处理身份证号，去除空值、首尾空格和单引号
                    error_ids = set()
                    for id_num in error_df["身份证号"].tolist():
                        if pd.notna(id_num):
                            error_ids.add(str(id_num).strip().strip("'"))
                    processed_ids.update(error_ids)
                    print(f"从错误文件中加载了 {len(error_ids)} 个身份证号")
            except Exception as e:
                print(f"读取错误ID文件时出错: {e}")

        id_list = [id_p.strip() for id_p in id_list if id_p.strip() not in processed_ids]

        print("总共的数据数目为:",len(id_list))

        if not id_list:
            print("没有需要处理的新数据")
            return

        # 分配任务给线程
        chunks = split_ids(id_list, THREAD_COUNT)
        threads = []
        for i, chunk in enumerate(chunks):
            # 将当前chunk存入Excel文件
            if chunk:  # 确保chunk不为空
                # 创建DataFrame
                df_chunk = pd.DataFrame({"身份证号": ["'"+str(x).strip() for x in chunk]})

                # 保存为Excel文件，每个线程一个文件
                chunk_file = CONF_ROOT / f"./执行结果/chunk_{i + 1}.xlsx"

                # 先清空文件内容（如果文件存在）
                if os.path.exists(chunk_file):
                    os.remove(chunk_file)

                # 写入新的数据
                df_chunk.to_excel(chunk_file, index=False)
                print(f"Chunk {i + 1} 已保存到 {chunk_file}，包含 {len(chunk)} 条记录")

            # 原有代码保持不变
            t = threading.Thread(target=worker, args=(i + 1, chunk,start_limit_time,end_limit_time))
            threads.append(t)
            t.start()

        for t in threads:
            t.join()

        print("所有线程处理完成！")

    except Exception as e:
        print(f"主程序错误: {str(e)}")


if __name__ == "__main__":
    start_time = time.time()
    with open(CONF_ROOT / "./文档/admin.txt", "r", encoding="utf-8") as f1:
        lines = f1.readlines()
    num_xiancheng = int(lines[4].strip(""))

    THREAD_COUNT = num_xiancheng
    main()
    print(f"总耗时: {time.time() - start_time:.2f}秒")
