import pandas as pd
from tqdm import tqdm
from openai import OpenAI,AsyncOpenAI
import time
import json
import requests
from llm_result_util import result_json_tackle
from concurrent.futures import ThreadPoolExecutor, as_completed
from system_prompt_v2 import generate_gxy_prompt, generate_tnb_prompt
import asyncio
import platform

# DeepSeek-R1医学评估接口配置
client = AsyncOpenAI(
    api_key="sk-ccb85689031348b589b07b6f322db5b8",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

retry_time = 3

def call_baidu_api(model_name, system_prompt, question):
    url = "https://qianfan.bj.baidubce.com/v2/chat/completions"

    print(f"Calling 百度云 model_name: " + model_name)
    start_time = time.time()  # 记录开始时间
    url = "https://qianfan.bj.baidubce.com/v2/chat/completions"

    # 控制输入长度，确保不超过5120个字符
    max_length = 5120
    if len(system_prompt) + len(question) > max_length:
        # 如果总长度超过限制，则截断question部分
        question = question[:max_length - len(system_prompt)]

    payload = json.dumps({
        "model": model_name,
        "messages": [
            {
                "role": "assistant",
                "content": system_prompt
            },
            {
                "role": "user",
                "content": question
            }
        ],
        "disable_search": False,
        "enable_citation": False
    }, ensure_ascii=False)
    headers = {
        'Content-Type': 'application/json',
        'Authorization': 'Bearer bce-v3/ALTAK-U7S4YWWVHS7oiyK8DFwMw/c9e98ab9ea526f31e82d020164ae1f9c03855b57'
    }

    response = requests.request("POST", url, headers=headers, data=payload.encode("utf-8"))
    end_time = time.time()  # 记录结束时间
    print('最终结果: ' + response.text)
    print(f"BAIDU API call took {end_time - start_time} seconds")  # 打印耗时信息
    return response.json().get('choices', [{}])[0].get('message', {}).get('content', '')


async def call_deepseek_by_bailian_block(model_names, prompt, question, answers_dict):
    """调用DeepSeek API进行评分，支持多个模型并行调用"""
    results = {}

    for model_name in model_names:
        print(f"Calling model: {model_name}")
        start_time = time.time()
        try:
            # 确保 R1_Answer 存在
            if 'R1_Answer' not in answers_dict:
                print("Error: R1_Answer not found in answers_dict.")
                return model_name, None

            user_content = f"问题：{question}，标准答案R1_Answer：{answers_dict['R1_Answer']}"
            for col, ans in answers_dict.items():
                if col != 'R1_Answer':
                    user_content += f"，参考答案{col}：{ans}" if ans else f"，参考答案{col}：(空)"

            completion = await client.chat.completions.create(
                model=model_name,
                messages=[
                    {'role': 'system', 'content': prompt},
                    {'role': 'user', 'content': user_content}
                ],
                stream=True
            )
            reasoning_content = ""
            answer_content = ""
            if model_name == "qwq-plus":
                response_content = ""
                async for chunk in completion:
                    if chunk.choices[0].delta.content:
                        response_content += chunk.choices[0].delta.content
                print(f'模型 {model_name} 最终结果: ' + response_content)
                end_time = time.time()
                print(f"模型 {model_name} API call took {end_time - start_time:.2f} seconds")
                answer_content = response_content
            else:
                async for chunk in completion:
                    # 如果chunk.choices为空，则打印usage
                    if not chunk.choices:
                        print("\nUsage:")
                        print(chunk.usage)
                    else:
                        delta = chunk.choices[0].delta
                        # 打印思考过程
                        if hasattr(delta, 'reasoning_content') and delta.reasoning_content != None:
                            reasoning_content += delta.reasoning_content
                        else:
                            # 开始回复
                            if delta.content != "":
                                answer_content += delta.content
                print(f"call model_name: {model_name}最终结果: {answer_content}")

                end_time = time.time()  # 记录结束时间
                print(f"model:{model_name} API call took {end_time - start_time} seconds")  # 打印耗时信息
        except Exception as e:
            print(f"Error calling model {model_name}: {e}")
            answer_content = None
        results[model_name] = answer_content
    '''
    with ThreadPoolExecutor(max_workers=len(model_names)) as executor:
        futures = [executor.submit(call_model, model_name) for model_name in model_names]
        for future in as_completed(futures):
            model_name, result = future.result()
            results[model_name] = result
    '''

    return results


def read_excel(file_path):
    """从Excel文件中读取数据"""
    try:
        df = pd.read_excel(file_path)
        return df
    except FileNotFoundError:
        print(f"Error: File {file_path} not found.")
        return None
    except Exception as e:
        print(f"Error reading Excel file: {e}")
        return None


def write_excel(file_path, data):
    """将结果写入Excel文件"""
    try:
        df = pd.DataFrame(data)
        df.to_excel(file_path, index=False)
        print(f"Results written to {file_path}")
    except Exception as e:
        print(f"Error writing to Excel file: {e}")



async def process_data(df, model_names, prompt_generator):
    """对数据进行逐行处理，支持多模型评分"""
    results = []
    answer_columns = [col for col in df.columns if 'Answer' in col]

    async def process_row(index, row,retry):
        question = row["Question"]
        answers_dict = {col: (row[col].strip() if isinstance(row[col], str) else row[col]) for col in answer_columns}
        prompt = prompt_generator(model_names, answer_columns)

        #evaluation_results = await call_deepseek_by_bailian_block(model_names, prompt, question, answers_dict)

        """调用DeepSeek API进行评分，支持多个模型并行调用"""
        evaluation_results = {}

        for model_name in model_names:
            print(f"Calling model: {model_name}")
            start_time = time.time()
            try:
                # 确保 R1_Answer 存在
                if 'R1_Answer' not in answers_dict:
                    print("Error: R1_Answer not found in answers_dict.")
                    return model_name, None

                user_content = f"问题：{question}，标准答案R1_Answer：{answers_dict['R1_Answer']}"
                for col, ans in answers_dict.items():
                    if col != 'R1_Answer':
                        user_content += f"，参考答案{col}：{ans}" if ans else f"，参考答案{col}：(空)"

                completion = await client.chat.completions.create(
                    model=model_name,
                    messages=[
                        {'role': 'system', 'content': prompt},
                        {'role': 'user', 'content': user_content}
                    ],
                    stream=True
                )
                reasoning_content = ""
                answer_content = ""
                if model_name == "qwq-plus":
                    response_content = ""
                    async for chunk in completion:
                        if chunk.choices[0].delta.content:
                            response_content += chunk.choices[0].delta.content
                    print(f'模型 {model_name} 最终结果: ' + response_content)
                    end_time = time.time()
                    print(f"模型 {model_name} API call took {end_time - start_time:.2f} seconds")
                    answer_content = response_content
                else:
                    async for chunk in completion:
                        # 如果chunk.choices为空，则打印usage
                        if not chunk.choices:
                            print("\nUsage:")
                            print(chunk.usage)
                        else:
                            delta = chunk.choices[0].delta
                            # 打印思考过程
                            if hasattr(delta, 'reasoning_content') and delta.reasoning_content != None:
                                reasoning_content += delta.reasoning_content
                            else:
                                # 开始回复
                                if delta.content != "":
                                    answer_content += delta.content
                    print(f"call model_name: {model_name}最终结果: {answer_content}")

                    end_time = time.time()  # 记录结束时间
                    print(f"model:{model_name} API call took {end_time - start_time} seconds")  # 打印耗时信息
            except Exception as e:
                print(f"Error calling model {model_name}: {e}")
                answer_content = None
            evaluation_results[model_name] = answer_content
        '''
        with ThreadPoolExecutor(max_workers=len(model_names)) as executor:
            futures = [executor.submit(call_model, model_name) for model_name in model_names]
            for future in as_completed(futures):
                model_name, result = future.result()
                results[model_name] = result
        '''


        result_data = {"Question": question,
                       "Ascvd": row["Ascvd"],
                       "Ascvd_Result": row["Ascvd_Result"]
                       }
        for col in answer_columns:
            result_data[col] = row[col]

        for model_name in model_names:
            eval_result = evaluation_results.get(model_name)
            if not eval_result:
                for col in answer_columns:
                    result_data[f"{model_name}_{col}_Score"] = "no_response"
                continue

            try:

                result_str = result_json_tackle(eval_result)
                if result_str is None:
                    if(retry < retry_time):
                        retry += 1
                        return await process_row(index, row,retry)

                data = json.loads(result_str)
                #data = json.dumps(eval_result)
                evaluation_detail = data.get('evaluation_detail', {})

                for col in answer_columns:
                    if not answers_dict[col]:
                        result_data[f"{model_name}_{col}_Score"] = "0/0"  # 空值统一格式
                    else:
                        score_entry = data.get(f"{model_name}_{col}_score")
                        result_data[f"{model_name}_{col}_Score"] = str(score_entry)  # 保留原始分数字符串

                result_data[f"{model_name}_Evaluation_Detail"] = eval_result
            except Exception as e:
                for col in answer_columns:
                    result_data[f"{model_name}_{col}_Score"] = f"parse_error:{str(e)[:50]}"
                result_data[f"{model_name}_Evaluation_Detail"] = f"解析失败：{str(e)[:100]}"

        return result_data

    '''
    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_row, index, row) for index, row in df.iterrows()]
        for future in tqdm(as_completed(futures), total=len(futures), desc="Processing rows"):
            try:
                results.append(future.result())
            except Exception as e:
                # 错误处理逻辑保持不变
                pass
    '''

    # 每 20 行拆分处理
    chunk_size = 20
    for start in range(0, len(df), chunk_size):
        chunk = df.iloc[start:start + chunk_size]
        # 创建多个任务但不等待
        tasks = [
            asyncio.create_task(process_row(index, row,0))
            for index, row in chunk.iterrows()
        ]
        # 可以继续执行其他代码...
        print("任务已提交，主线程继续执行")

        # 如果需要在程序结束前等待所有任务完成：
        result = await asyncio.gather(*tasks)
        results += result
        print(f"----- 已处理 {len(chunk)} 行，当前批次结束 -----")

    return results


def process_tnb(input_file_tnb, output_file_tnb, model_names):
    # 处理糖尿病数据
    df_tnb = read_excel(input_file_tnb)
    if df_tnb is not None:
        results_tnb = process_data(df_tnb, model_names, generate_tnb_prompt)
        write_excel(output_file_tnb, results_tnb)
    else:
        print("No diabetes data to process.")


def process_gxy(input_file_gxy, output_file_gxy, model_names):
    # 处理高血压数据
    df_gxy = read_excel(input_file_gxy)
    if df_gxy is not None:
        results_gxy = asyncio.run(process_data(df_gxy, model_names, generate_gxy_prompt))
        write_excel(output_file_gxy, results_gxy)
    else:
        print("No hypertension data to process.")


if __name__ == "__main__":
    if platform.system() == "Windows":
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
    # 数据文件路径
    model_names = ["deepseek-r1","qwq-plus"]
    input_file_gxy = "./input/input_results_gxy_qwen3-0515_error.xlsx"
    output_file_gxy = "./output/output_results_gxy_qwen3-0515_score_async_error_2.xlsx"
    start_time = time.time()  # 记录开始时间
    process_gxy(input_file_gxy, output_file_gxy, model_names)
    end_time = time.time()  # 记录结束时间
    elapsed_time = end_time - start_time  # 计算耗时（秒）
    print(f"异步代码执行耗时: {elapsed_time:.4f} 秒")
    #异步代码执行耗时: 188.4266 秒
    #140条   1277.3177 秒

    #input_file_tnb = "../genanswer/output_results_tnb_qwen3-0513_new.xlsx"
    #output_file_tnb = "../eval/output_results_tnb_qwen3-0513_score.xlsx"
    #process_tnb(input_file_tnb, output_file_tnb, model_names)
