import pandas as pd
from tqdm import tqdm
from openai import OpenAI,AsyncOpenAI
import time
import json
import requests
from llm_result_util import result_json_tackle
from concurrent.futures import ThreadPoolExecutor, as_completed
from system_prompt_v3 import generate_gxy_prompt, generate_tnb_prompt
import asyncio
import platform

# DeepSeek-R1医学评估接口配置
client = AsyncOpenAI(
    api_key="sk-ccb85689031348b589b07b6f322db5b8",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)


def call_baidu_api(model_name, system_prompt, question):
    url = "https://qianfan.bj.baidubce.com/v2/chat/completions"

    print(f"Calling 百度云 model_name: " + model_name)
    start_time = time.time()  # 记录开始时间
    url = "https://qianfan.bj.baidubce.com/v2/chat/completions"

    # 控制输入长度，确保不超过5120个字符
    max_length = 5120
    if len(system_prompt) + len(question) > max_length:
        # 如果总长度超过限制，则截断question部分
        question = question[:max_length - len(system_prompt)]

    payload = json.dumps({
        "model": model_name,
        "messages": [
            {
                "role": "assistant",
                "content": system_prompt
            },
            {
                "role": "user",
                "content": question
            }
        ],
        "disable_search": False,
        "enable_citation": False
    }, ensure_ascii=False)
    headers = {
        'Content-Type': 'application/json',
        'Authorization': 'Bearer bce-v3/ALTAK-U7S4YWWVHS7oiyK8DFwMw/c9e98ab9ea526f31e82d020164ae1f9c03855b57'
    }

    response = requests.request("POST", url, headers=headers, data=payload.encode("utf-8"))
    end_time = time.time()  # 记录结束时间
    print('最终结果: ' + response.text)
    print(f"BAIDU API call took {end_time - start_time} seconds")  # 打印耗时信息
    return response.json().get('choices', [{}])[0].get('message', {}).get('content', '')


async def call_deepseek_by_bailian_block(model_names, prompt_generator, question, answers_dict):
    """调用DeepSeek API进行评分，支持多个模型并行调用"""
    results = {}

    async def call_model(model_name):
        print(f"Calling model: {model_name}")
        start_time = time.time()
        try:
            # 确保 R1_Answer 存在
            if 'R1_Answer' not in answers_dict:
                print("Error: R1_Answer not found in answers_dict.")
                return model_name, None

            user_content = f"问题：{question}，标准答案R1_Answer：{answers_dict['R1_Answer']}"

            async def call_model_sub(model_name, col, ans):
                if col == 'R1_Answer':
                    return '{}'
                user_content_with_answer = user_content + f"，参考答案{col}：{ans}" if ans else f"，参考答案{col}：(空)"
                prompt = prompt_generator([model_name], [col])
                # print(prompt)

                completion = await client.chat.completions.create(
                model=model_name,
                messages=[
                {'role': 'system', 'content': prompt},
                {'role': 'user', 'content': user_content_with_answer}
                ],
                stream = True
                )


                reasoning_content=""
                answer_content=""
                if model_name == "qwq-plus":
                    response_content = ""
                    async for chunk in completion:
                        if chunk.choices[0].delta.content:
                            response_content += chunk.choices[0].delta.content
                    print(f'模型 {model_name} 最终结果: ' + response_content)
                    end_time = time.time()
                    print(f"模型 {model_name} API call took {end_time - start_time:.2f} seconds")
                    return response_content
                else:
                    async for chunk in completion:
                        # 如果chunk.choices为空，则打印usage
                        if not chunk.choices:
                            print("\nUsage:")
                            print(chunk.usage)
                        else:
                            delta = chunk.choices[0].delta
                            # 打印思考过程
                            if hasattr(delta, 'reasoning_content') and delta.reasoning_content != None:
                                reasoning_content += delta.reasoning_content
                            else:
                                # 开始回复
                                if delta.content != "":
                                    answer_content += delta.content
                    print(f"call model_name: {model_name}最终结果: {answer_content}")

                    end_time = time.time()  # 记录结束时间
                    print(f"model:{model_name} API call took {end_time - start_time} seconds")  # 打印耗时信息
                    return answer_content

            merged_res = {}

            tasks = []
            for col, ans in answers_dict.items():
                tasks.append(call_model_sub(
                    model_name, col, ans
                ))
                #model_name, result = await call_model_sub(model_name, col, ans)
                #merged_res = await merge_dicts(merged_res, result)

            # 并发执行所有任务
            results = await asyncio.gather(*tasks)
            for result in results:
                merged_res = merge_dicts(merged_res, result)
            return str(merged_res).replace("'", '"')

            '''
            with ThreadPoolExecutor(max_workers=len(answers_dict) -1) as executor:
                futures = [executor.submit(call_model_sub, model_name, col, ans) for col, ans in answers_dict.items()]
                merged_res = {}
                for future in as_completed(futures):
                    model_name, result = future.result()
                    # print("result >>>>>>")
                    # print(result)
                    merged_res =  merge_dicts(merged_res, result)
                return model_name, str(merged_res).replace("'", '"')
            '''

        except Exception as e:
            print(f"Error calling model {model_name}: {e}")
            return model_name, None


    '''
    with ThreadPoolExecutor(max_workers=len(model_names)) as executor:
        futures = [executor.submit(call_model, model_name) for model_name in model_names]
        for future in as_completed(futures):
            model_name, result = future.result()
            results[model_name] = result
    '''

    for model_name in model_names:
        result = await call_model(model_name)
        results[model_name] = result

    return results

def merge_dicts(dict1, dict2):
    result = dict1.copy()
    data_dict = {}
    if isinstance(dict2, str):
        data_dict = json.loads(result_json_tackle(dict2))
    if isinstance(dict2, dict):
        data_dict = dict2.copy()
    for key, value in data_dict.items():
        if key in result and isinstance(result[key], dict) and isinstance(value, dict):
            result[key] = merge_dicts(result[key], value)
        else:
            result[key] = value
    return result

def read_excel(file_path):
    """从Excel文件中读取数据"""
    try:
        df = pd.read_excel(file_path)
        return df
    except FileNotFoundError:
        print(f"Error: File {file_path} not found.")
        return None
    except Exception as e:
        print(f"Error reading Excel file: {e}")
        return None


def write_excel(file_path, data):
    """将结果写入Excel文件"""
    try:
        df = pd.DataFrame(data)
        df.to_excel(file_path, index=False)
        print(f"Results written to {file_path}")
    except Exception as e:
        print(f"Error writing to Excel file: {e}")


async def process_data(df, model_names, prompt_generator, evaluation_detail_columns):
    """对数据进行逐行处理，支持多模型评分"""
    results = []
    answer_columns = [col for col in df.columns if 'Answer' in col]

    async def process_row(index, row):
        question = row["Question"]
        answers_dict = {col: (row[col].strip() if isinstance(row[col], str) else row[col]) for col in answer_columns}
        try:
            evaluation_results = await call_deepseek_by_bailian_block(model_names, prompt_generator, question, answers_dict)
            result_data = {"Question": question,
                           "Ascvd": row["Ascvd"],
                           "Ascvd_Result": row["Ascvd_Result"]
                           }
            for col in answer_columns:
                if col != 'R1_Answer':
                    result_data[col] = row[col]


            for model_name in model_names:
                eval_result = evaluation_results.get(model_name)
                if not eval_result:
                    for col in answer_columns:
                        result_data[f"{model_name}_{col}_Score"] = "no_response"
                    continue

                try:
                    result_str = result_json_tackle(eval_result)
                    data = json.loads(result_str)
                    evaluation_detail = data.get('evaluation_detail', {})

                    for col in answer_columns:
                        if col != 'R1_Answer':
                            if not answers_dict[col]:
                                result_data[f"{model_name}_{col}_Score"] = "0/0"  # 空值统一格式
                            else:
                                score_entry = data.get(f"{model_name}_{col}_score")
                                result_data[f"{model_name}_{col}_Score"] = str(score_entry)  # 保留原始分数字符串
                                for eval_col in evaluation_detail_columns:
                                    result_data[f"{model_name}_{col}_{eval_col}"] = str(evaluation_detail.get(f"{model_name}_{col}", {}).get(eval_col))

                    result_data[f"{model_name}_Evaluation_Detail"] = eval_result
                except Exception as e:
                    for col in answer_columns:
                        result_data[f"{model_name}_{col}_Score"] = f"parse_error:{str(e)[:50]}"
                    result_data[f"{model_name}_Evaluation_Detail"] = f"解析失败：{str(e)[:100]}"

            return result_data
        except Exception as e:
            print(f"Error process_row: {e}")

    '''
    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_row, index, row) for index, row in df.iterrows()]
        for future in tqdm(as_completed(futures), total=len(futures), desc="Processing rows"):
            try:
                results.append(future.result())
            except Exception as e:
                # 错误处理逻辑保持不变
                pass
    '''

    # 每 10 行拆分处理
    chunk_size = 10
    for start in range(0, len(df), chunk_size):
        chunk = df.iloc[start:start + chunk_size]
        # 创建多个任务但不等待
        tasks = [
            asyncio.create_task(process_row(index, row))
            #asyncio.to_thread(process_row, index,row)
            for index, row in chunk.iterrows()
        ]
        # 可以继续执行其他代码...
        print("任务已提交，主线程继续执行")

        # 如果需要在程序结束前等待所有任务完成：
        result = await asyncio.gather(*tasks)
        results += result
        print(f"----- 已处理 {len(chunk)} 行，当前批次结束 -----")

    return results


def process_tnb(input_file_tnb, output_file_tnb, model_names):
    evaluation_detail_columns = ["诊断准确性评分", "用药方案评分", "治疗方案评分", "检查检验方案评分", "饮食运动方案评分"]
    # 处理糖尿病数据
    df_tnb = read_excel(input_file_tnb)
    if df_tnb is not None:
        results_tnb = process_data(df_tnb, model_names, generate_tnb_prompt, evaluation_detail_columns)
        write_excel(output_file_tnb, results_tnb)
    else:
        print("No diabetes data to process.")


def process_gxy(input_file_gxy, output_file_gxy, model_names):
    if platform.system() == "Windows":
        asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
    evaluation_detail_columns = ["ASCVD分层结果准确性评分", "诊断准确性评分", "用药方案评分", "治疗方案评分", "检查检验方案评分", "生活方式干预方案评分"]
    # 处理高血压数据
    df_gxy = read_excel(input_file_gxy)
    if df_gxy is not None:
        #results_gxy = asyncio.run(process_data(df_gxy, model_names, generate_gxy_prompt, evaluation_detail_columns))
        results_gxy = asyncio.run(process_data(df_gxy, model_names, generate_gxy_prompt, evaluation_detail_columns))
        # 获取主线程的事件循环
        #loop = asyncio.get_event_loop()
        #future = asyncio.run_coroutine_threadsafe(process_data(df_gxy, model_names, generate_gxy_prompt, evaluation_detail_columns), loop)
        #results_gxy = future.result()  # 阻塞等待结果
        write_excel(output_file_gxy, results_gxy)
    else:
        print("No hypertension data to process.")


if __name__ == "__main__":
    # 数据文件路径
    # model_names = ["deepseek-r1"]
    model_names = ["deepseek-r1","qwq-plus"]
    # input_file_gxy = "../genanswer/output_results_gxy_qwen3-0513_new.xlsx"
    input_file_gxy = "./input/input_results_gxy_qwen3-0515_30.xlsx"
    # output_file_gxy = "../eval/output_results_gxy_qwen3-0513_score.xlsx"
    output_file_gxy = "./output/output_results_gxy_qwen3-0515_30_async_2.xlsx"
    start_time = time.time()  # 记录开始时间
    process_gxy(input_file_gxy, output_file_gxy, model_names)
    end_time = time.time()  # 记录结束时间
    elapsed_time = end_time - start_time  # 计算耗时（秒）
    print(f"异步代码执行耗时: {elapsed_time:.4f} 秒")
    #异步代码执行耗时: 514.8443 秒

    # input_file_tnb = "../genanswer/output_results_tnb_qwen3-0513_new.xlsx"
    # output_file_tnb = "../eval/output_results_tnb_qwen3-0513_score.xlsx"
    # process_tnb(input_file_tnb, output_file_tnb, model_names)
