import time
import pandas as pd
import requests
import json
import jsonpath
from lxml import etree
import threading
import os
import csv

# 导入各个字段提取模块（根据实际路径调整）
from comment.config import CONF_ROOT
from compements.assemblies.file_group_01 import group_01
from compements.assemblies.file_group_0 import group_0
from compements.assemblies.file_group_1 import group_1
from compements.assemblies.file_group_2 import group_2
from compements.assemblies.file_group_3 import group_3
from compements.assemblies.file_group_4 import group_4
from compements.assemblies.file_group_5 import group_5
from compements.assemblies.file_group_6 import group_6
from compements.assemblies.file_group_7 import group_7
from compements.assemblies.file_group_8 import group_8
from compements.assemblies.file_group_9 import group_9
from compements.assemblies.file_group_10 import group_10

# 获取Cookie模块
from comment.cookie_find import get_cookies

# 全局配置

RESULT_CSV = CONF_ROOT / "./执行结果/result.csv"
ERROR_EXCEL = CONF_ROOT / "./执行结果/问题文档.xlsx"
CONFIG_FILE = CONF_ROOT / "./文档/config.txt"  # 新增配置文件
DOMAIN, COOKIE, JIGOUMA = get_cookies()
HOST = DOMAIN.strip("http://")
print("domain:", DOMAIN, "cookie:", COOKIE, "jigouma:", JIGOUMA, "HOST", HOST)
cookie_lock = threading.Lock()
csv_lock = threading.Lock()
error_lock = threading.Lock()


# 定时更新 Cookie 的函数
def update_cookies_periodically():
    global COOKIE, DOMAIN, JIGOUMA, HOST  # 添加 HOST
    while True:
        time.sleep(1200)  # 每隔 20 分钟更新一次
        with cookie_lock:
            DOMAIN, COOKIE, JIGOUMA = get_cookies()
            HOST = DOMAIN.strip("http://")  # 同步更新HOST
            print("Cookie 和相关配置已更新")


# 加载配置文件
def load_config():
    """加载配置文件，如果没有则创建默认配置"""
    default_config = {
        "delay_ms": 500  # 默认延迟500毫秒
    }

    if not os.path.exists(CONFIG_FILE):
        with open(CONFIG_FILE, "w", encoding="utf-8") as f:
            for key, value in default_config.items():
                f.write(f"{key}={value}\n")
        return default_config
    else:
        config = {}
        with open(CONFIG_FILE, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line and "=" in line:
                    key, value = line.split("=", 1)
                    config[key.strip()] = value.strip()
        # 确保所有默认值都存在
        for key, value in default_config.items():
            if key not in config:
                config[key] = value
        return config


# 更新配置文件
def update_config(delay_ms):
    """更新配置文件"""
    with open(CONFIG_FILE, "w", encoding="utf-8") as f:
        f.write(f"delay_ms={delay_ms}\n")


# 加载配置
CONFIG = load_config()
DELAY_SECONDS = float(CONFIG["delay_ms"]) / 1000  # 转换为秒


# 记录错误信息
def record_error_id(id_person, error_msg):
    """记录错误信息到Excel"""
    error_data = {
        "身份证号": ["'"+str(id_person)],
        "错误信息": [error_msg],
        "时间": [time.strftime("%Y-%m-%d %H:%M:%S")]
    }

    with error_lock:
        if os.path.exists(ERROR_EXCEL):
            df = pd.read_excel(ERROR_EXCEL)
            new_row = pd.DataFrame(error_data)
            df = pd.concat([df, new_row], ignore_index=True)
        else:
            df = pd.DataFrame(error_data)
        df.to_excel(ERROR_EXCEL, index=False)


# 保存数据
def save_to_csv(data, filename=RESULT_CSV):
    """使用pandas写入CSV"""
    if not data or not isinstance(data, dict):
        return False

    try:
        with csv_lock:
            # 转换为DataFrame
            df_new = pd.DataFrame([data])

            if os.path.exists(filename):
                df_existing = pd.read_csv(filename)
                df_combined = pd.concat([df_existing, df_new], ignore_index=True)
                df_combined.to_csv(filename, index=False, encoding='utf-8-sig')
            else:
                df_new.to_csv(filename, index=False, encoding='utf-8-sig')

            return True
    except Exception as e:
        print(f"CSV写入失败: {str(e)}")
        return False


# 初始化线程进度文件
# 修改初始化线程环境的函数
def init_thread_env(thread_id, total, completed_initial=0):
    """初始化线程进度文件
    Args:
        thread_id: 线程ID
        total: 总任务数（已处理+待处理）
        completed_initial: 初始已完成数量
    """
    filename = f"./执行结果/env_thread_{thread_id}.txt"
    if not os.path.exists(filename):
        with open(filename, "w", encoding="utf-8") as f:
            f.write(f"总操作数:{total}\n")
            f.write("当前处理身份证号:\n")
            f.write(f"已完成数量:{completed_initial}\n")
            f.write("当前索引:0\n")


# 更新线程进度
def update_thread_env(thread_id, current_id, completed, current_index):
    """更新线程进度"""
    filename = f"./执行结果/env_thread_{thread_id}.txt"
    with open(filename, "r", encoding="utf-8") as f:
        lines = f.readlines()
        total = lines[0].strip()

    with open(filename, "w", encoding="utf-8") as f:
        f.write(total + "\n")
        f.write(f"当前处理身份证号:{current_id}\n")
        f.write(f"已完成数量:{completed}\n")
        f.write(f"当前索引:{current_index}\n")


# 线程进度
def get_thread_progress(thread_id):
    """获取线程进度"""
    filename = f"./执行结果/env_thread_{thread_id}.txt"
    if os.path.exists(filename):
        with open(filename, "r", encoding="utf-8") as f:
            lines = f.readlines()
            return {
                "total": int(lines[0].split(":")[1]),
                "current_id": lines[1].split(":")[1].strip(),
                "completed": int(lines[2].split(":")[1]),
                "index": int(lines[3].split(":")[1])
            }
    return None


# 核心爬虫逻辑
def crawler_program(id_person):
    """核心爬虫逻辑"""



    try:
        # 直接使用全局变量
        with cookie_lock:  # 确保读取时的一致性
            url = f"{DOMAIN}/phis/app/ehr?limit=26&start=0&_dch=1752211623890"
            headers = {
                "accept": "application/json",
                "content-Type": "application/json",
                "cookie": f"JSESSIONID={COOKIE}",
                "Host": HOST,
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"
            }
            jigouma = JIGOUMA  # 创建局部副本用于字符串替换

        json_str = '{"ehrBaseFilterMap":{"EQ_gender":"","GTE_birthDate":"","LTE_birthDate":"","LIKE_innerCode":"%","GTE_dateCreated":"","LTE_dateCreated":"","EQ_signTeamCode":"","LIKE_addrCode":null,"EQ_curContract":"","EQ_individualConStatus":"","EQ_creator":"","EQ_idNumber":"身份证号","LIKE_ehrCode":"%","LIKE_mngOrgCode":"机构码%","LIKE_nameIndex":"%"},"ehrHfIndictorMap":{},"ehrClassifyGrFilterMap":{},"ehrClassifyCdFilterMap":{},"ehrClassifySpFilterMap":{},"grRelation":"OR","cdRelation":"OR","spRelation":"OR","fetchIdType":true,"fetchFamily":true,"fetchHasVSvcFlwChronicWf":true,"fetchHasFirstSoap":true,"fetchHasFlwMental":true,"fetchHasSvcExam1":true,"fetchHasAsmOldS":true,"fetchHasAsmYear":true,"fetchChronic":true,"fetchHasAsmOldA":true}'.replace(
            "身份证号", id_person).replace("机构码", jigouma)

        res = requests.post(url, headers=headers, data=json_str, timeout=30)

        if res.status_code != 200:
            raise Exception(f"HTTP状态码: {res.status_code}")

        json_data = json.loads(res.text)
        json_info = jsonpath.jsonpath(json_data, '$..id')[0]
        ehrid = jsonpath.jsonpath(json_data, '$..ehrId')[0]
        # print("重要数据获取：", "1:", json_info, "2:", ehrid)
        # 获取各组数据时也需要使用锁保护
        with cookie_lock:
            dict_group_01 = group_01(json_info, COOKIE, DOMAIN)
            dict_group_0 = group_0(json_info, COOKIE, DOMAIN)

        target_url = f"{DOMAIN}/phis/app/ehr/update/{json_info},,,false"
        target_res = requests.get(target_url, headers=headers, timeout=30)
        tree = etree.HTML(target_res.text)

        with cookie_lock:
            result = {
                **dict_group_01,
                **dict_group_0,
                **group_1(tree),
                **group_2(tree),
                **group_3(tree),
                **group_4(tree),
                **group_5(tree),
                **group_6(tree),
                **group_7(tree),
                **group_8(ehrid, {
                    "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
                    "cookie": f"JSESSIONID={COOKIE}",
                    "host": HOST
                }, DOMAIN),
                **group_9(ehrid, {
                    "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
                    "cookie": f"JSESSIONID={COOKIE}",
                    "host": HOST
                }, DOMAIN),
                **group_10(ehrid, {
                    "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
                    "cookie": f"JSESSIONID={COOKIE}",
                    "host": HOST
                }, str(time.time()).replace(".", "")[0:13], DOMAIN)
            }
        print(f"获取到数据: {id_person}")
        # 从result中提取指定字段
        selected_fields = [
            "首诊记录","建档日期", "档案编号", "建档单位", "建档人", "更新日期", "姓名", "身份证号",
            "单位联系人电话", "单位联系人姓名", "单位电话", "联系人电话", "联系人姓名", "现住址",
            "现住址详细地址", "所属派出所", "邮政编码", "EMail", "本人电话", "住宅电话", "证件类型",
            "性别", "文化程度", "民族", "居住状况", "户别", "常驻类型", "国籍", "籍贯", "户籍地址",
            "工作单位", "职业", "同住者", "医疗费用支付方式", "医疗支付方式其它", "特殊人群分类",
            "人群分类", "定点医疗单位", "农合号", "医保号", "暂住证", "来京日期", "户口所在地",
            "婚姻状况", "市", "区/县", "乡镇/街道", "居委会/村", "小区", "门牌号", "父亲", "母亲",
            "兄弟姐妹", "子女", "血型", "RH", "药物过敏史", "暴露史", "遗传病史", "残疾情况",
            "其他残疾", "厨房排风设施", "燃料类型", "饮水", "厕所", "禽畜栏", "身高", "体重",
            "BMI", "腰围", "臀围", "腰臀比", "收缩压", "舒张压", "管理机构", "内部建档号",
            "所属家庭", "与户主关系", "责任医生", "家庭保健员", "成为保健员日期", "健康分类",
            "疾病史", "手术史", "外伤史", "输血史", "个人主要健康问题", "是否吸烟", "开始吸烟时间",
            "戒断时间", "吸烟量", "是否饮酒", "饮酒类型", "饮酒量", "饮酒频率", "戒酒日期",
            "是否锻炼", "锻炼情况", "每次锻炼时间", "锻炼类型", "饮食习惯", "饮食量(主食量)",
            "睡眠时间", "睡眠情况", "其它习惯","年龄"
        ]

        # 直接提取字段，不使用函数
        extracted_result = {}
        for field in selected_fields:
            extracted_result[field] = result.get(field, "")
        print("最终结果:", extracted_result)
        return extracted_result
    except TypeError as err:
        record_error_id(id_person, "本医院没有记录这条信息，请登录网页查询")
        print(f"处理失败: {id_person}, 错误: 未找到这条数据")
        return None
    except Exception as e:
        record_error_id(id_person, str(e))
        print(f"处理失败: {id_person}, 错误: {str(e)}")
        return None


# 修改worker函数
def worker(thread_id, data_slice):


    """线程工作函数"""
    progress = get_thread_progress(thread_id)  # 获取当前线程进度

    # 设置总任务数为数据切片的长度
    total_tasks = len(data_slice)

    if progress:
        start_index = progress["index"]
        completed = progress["completed"]

        # 更新总任务数为记录中的值（保持一致性）
        # 但在重新开始时，我们应该使用实际分配的数据量
    else:
        start_index = 0
        completed = 0
        # 初始化环境文件，总任务数设为当前分配的任务数
        init_thread_env(thread_id, total_tasks, completed)

    # 重新读取env文件中的总任务数，确保一致性
    env_info = get_thread_progress(thread_id)
    if env_info and env_info["total"] != total_tasks:
        # 如果总任务数发生变化，更新env文件
        filename = f"./执行结果/env_thread_{thread_id}.txt"
        with open(filename, "r", encoding="utf-8") as f:
            lines = f.readlines()

        with open(filename, "w", encoding="utf-8") as f:
            f.write(f'总操作数:{int(start_index)+total_tasks}\n')  # 更新总任务数
            f.writelines(lines[1:])  # 保留其他行

    for idx in range(0, len(data_slice)):
        id_person = str(data_slice[idx])
        idx += start_index
        update_thread_env(thread_id, id_person, completed, idx)
        print(f"{thread_id}线程正在获取: {id_person}信息")

        result = crawler_program(id_person)
        if result:
            if save_to_csv(result):
                completed += 1
                update_thread_env(thread_id, id_person, completed, idx + 1)
                print(f"{thread_id}保存成功: {id_person}")
            else:

                print(f"保存失败: {id_person}")
        else:
            if idx == len(data_slice) - 1:
                update_thread_env(thread_id, id_person, completed, idx + 1)
                print(f"数据处理失败: {id_person}")
                continue
            else:
                update_thread_env(thread_id, id_person, completed, idx + 1)
                print(f"数据处理失败: {id_person}")

        # 每次处理完一条数据后暂停
        print(f"{thread_id}正在等待")
        time.sleep(DELAY_SECONDS)
        print(f"{thread_id}等待完成")


def split_ids(id_list, thread_count):
    total = len(id_list)
    base_size = total // thread_count
    remainder = total % thread_count

    chunks = []
    start = 0
    for i in range(thread_count):
        # 修改分配逻辑：后 `remainder` 个线程每个多一个元素
        size = base_size + (1 if i >= (thread_count - remainder) else 0)
        chunks.append(id_list[start:start + size])
        start += size
    return chunks


def main():
    """主程序"""
    os.makedirs("执行结果", exist_ok=True)

    # 创建一个线程，用于定时更新cookies
    cookie_thread = threading.Thread(target=update_cookies_periodically, daemon=True)
    cookie_thread.start()
    try:
        # 重新加载配置，以便在运行时可以修改
        global CONFIG, DELAY_SECONDS
        CONFIG = load_config()
        DELAY_SECONDS = float(CONFIG["delay_ms"]) / 1000

        print(f"当前配置: 每条数据处理后延迟 {CONFIG['delay_ms']} 毫秒")
        # 文件路径
        current_dir = CONF_ROOT / "./文档"
        excel_files = [f for f in os.listdir(current_dir) if f.endswith('.xlsx')]
        if excel_files:
            first_excel = os.path.join(current_dir, excel_files[0])

            df = pd.read_excel(first_excel, dtype={"身份证号": str})  # 确保身份证号作为字符串读取
        else:
            print("当前目录下没有找到Excel文件")

        if "身份证号" not in df.columns:
            raise ValueError("缺少身份证号列")

        id_list = df["身份证号"].astype(str).unique().tolist()
        print(f"总身份证号数量: {len(id_list)}")

        # 过滤已处理的ID
        processed_ids = set()
        if os.path.exists(RESULT_CSV):
            with open(RESULT_CSV, 'r', encoding='utf-8-sig') as f:
                reader = csv.DictReader(f)
                processed_ids = {row.get("身份证号", "").strip().strip("'") for row in reader}

        # 添加对错误ID文件的处理
        if os.path.exists(ERROR_EXCEL):
            try:
                error_df = pd.read_excel(ERROR_EXCEL)
                if "身份证号" in error_df.columns:
                    error_ids = {str(id_num).strip().strip("'") for id_num in error_df["身份证号"].tolist() if pd.notna(id_num)}
                    processed_ids.update(error_ids)
            except Exception as e:
                print(f"读取错误ID文件时出错: {e}")

        id_list = [id_p.strip() for id_p in id_list if id_p.strip() not in processed_ids]

        print("总共的数据数目为:",len(id_list))

        if not id_list:
            print("没有需要处理的新数据")
            return

        # 分配任务给线程
        chunks = split_ids(id_list, THREAD_COUNT)
        threads = []
        for i, chunk in enumerate(chunks):
            # 将当前chunk存入Excel文件
            if chunk:  # 确保chunk不为空
                # 创建DataFrame
                df_chunk = pd.DataFrame({"身份证号": ["'"+str(x).strip() for x in chunk]})

                # 保存为Excel文件，每个线程一个文件
                chunk_file = CONF_ROOT / f"./执行结果/chunk_{i + 1}.xlsx"

                # 先清空文件内容（如果文件存在）
                if os.path.exists(chunk_file):
                    os.remove(chunk_file)

                # 写入新的数据
                df_chunk.to_excel(chunk_file, index=False)
                print(f"Chunk {i + 1} 已保存到 {chunk_file}，包含 {len(chunk)} 条记录")

            # 原有代码保持不变
            t = threading.Thread(target=worker, args=(i + 1, chunk))
            threads.append(t)
            t.start()

        for t in threads:
            t.join()

        print("所有线程处理完成！")

    except Exception as e:
        print(f"主程序错误: {str(e)}")


if __name__ == "__main__":
    start_time = time.time()
    with open(CONF_ROOT / "./文档/admin.txt", "r", encoding="utf-8") as f1:
        lines = f1.readlines()
    num_xiancheng = int(lines[4].strip(""))

    THREAD_COUNT = num_xiancheng
    main()
    print(f"总耗时: {time.time() - start_time:.2f}秒")
