import json
import os
import re

import redis
from flask import Blueprint, request

from core.resp_model import respModel
from app import database, application
from perftest.model.ApiInfoCaseModel import ApiInfoCase
from datetime import datetime
from perftest.model.ApiHistoryModel import ApiHistoryModel
from config.dev_settings import PERF_LLM_BASE_URL,PERF_LLM_MODEL_NAME,PERF_LLM_API_KEY, REPORT_ROOT_DIR
# 模块信息
module_name = "AiReport"  # 模块名称
module_route = Blueprint(f"route_{module_name}", __name__)


# from core.RedisManager import RedisConnection
# 查询当前正在执行的计划状态，状态在redis中，返回一个当前计划的状态，当前状态值为 name: "计划名字",type:"web/app/api" ,status:"waiting"
@module_route.route(f"/{module_name}/ai", methods=["GET"])
def queryPlanStatus():
    project_id = int(request.args.get("projectId"))
    historyId = int(request.args.get("historyId"))
    with application.app_context():
        history = ApiHistoryModel.query.filter_by(id=historyId).first()
    if history is None or history.project_id != project_id:
        return respModel.error_resp("没有项目权限")
    executeUuid = history.history_detail
    requests_content = read_csv_line_by_line(os.path.join(REPORT_ROOT_DIR, executeUuid, "requests.csv"))
    failures_content = read_csv_line_by_line(os.path.join(REPORT_ROOT_DIR, executeUuid, "failures.csv"))
    exceptions_content = read_csv_line_by_line(os.path.join(REPORT_ROOT_DIR, executeUuid, "exceptions.csv"))

    from jinja2 import Environment, FileSystemLoader
    env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__))))
    template = env.get_template("prompt.txt")
    context = {
        "requests_content": requests_content,
        "failures_content": failures_content,
        "exceptions_content": exceptions_content,
    }
    ai_prompt = template.render(context)
    # print(ai_prompt)

    from openai import OpenAI
    ai_client = OpenAI(
        base_url=PERF_LLM_BASE_URL,
        api_key=PERF_LLM_API_KEY
    )
    result = []
    completion = ai_client.chat.completions.create(
            model=PERF_LLM_MODEL_NAME,
            # 此处以qwen-vl-plus为例，可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
            messages=[{"role": "user", "content": [{"type": "text", "text": ai_prompt}]}]
        )
    # completion.model_dump_json() 这里面有 AI交互所有信息，包括消耗的 token
    # print("AI执行结果： ",completion.model_dump_json())
    # 正则提取AI返回内容里面的json数据
    ai_response = json.loads(completion.model_dump_json())['choices'][0]['message']['content']
    print("AI执行结果： ",ai_response)
    # 提取 ```json 中间的json数据内容
    json_match = re.search(r'```json\n(.*?)```', ai_response, re.DOTALL)
    if json_match:
        json_data = json.loads(json_match.group(1))
        return respModel.ok_resp_simple_list(json_data, "查询成功")
    else:
        return respModel.error_resp("AI 分析异常")
    

def read_csv_line_by_line(file_path):
    text_content = []
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            text_content.append(line.strip())  # 去除每行末尾的换行符
    return "\n".join(text_content)
# main
if __name__ == '__main__':
    PERF_LLM_API_KEY="sk-7b6da213f566424eba4b947c439e75dc"
    PERF_LLM_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
    PERF_LLM_MODEL_NAME="qwen3-235b-a22b-instruct-2507" # 视觉大模型
    REPORT_ROOT_DIR = r"/report"   # 用例报告文件夹
    # from config.dev_settings import PERF_LLM_BASE_URL,PERF_LLM_MODEL_NAME,PERF_LLM_API_KEY, REPORT_ROOT_DIR
    executeUuid = "ec99dbe72d104c5fb78845ae2528d647"

    requests_content = read_csv_line_by_line(os.path.join(REPORT_ROOT_DIR, executeUuid, "requests.csv"))
    failures_content = read_csv_line_by_line(os.path.join(REPORT_ROOT_DIR, executeUuid, "failures.csv"))
    exceptions_content = read_csv_line_by_line(os.path.join(REPORT_ROOT_DIR, executeUuid, "exceptions.csv"))

    from jinja2 import Environment, FileSystemLoader
    env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__))))
    template = env.get_template("prompt.txt")
    context = {
        "requests_content": requests_content,
        "failures_content": failures_content,
        "exceptions_content": exceptions_content,
    }
    ai_prompt = template.render(context)
    print(ai_prompt)

    from openai import OpenAI
    ai_client = OpenAI(
        base_url=PERF_LLM_BASE_URL,
        api_key=PERF_LLM_API_KEY
    )
    result = []
    completion = ai_client.chat.completions.create(
            model=PERF_LLM_MODEL_NAME,
            # 此处以qwen-vl-plus为例，可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
            messages=[{"role": "user", "content": [{"type": "text", "text": ai_prompt}]}]
        )
    # completion.model_dump_json() 这里面有 AI交互所有信息，包括消耗的 token
    print("AI执行结果： ",completion.model_dump_json())
    # 正则提取AI返回内容里面的json数据
    ai_response = json.loads(completion.model_dump_json())['choices'][0]['message']['content']
    print("AI执行结果： ",ai_response)
    # 提取 ```json 中间的json数据内容
    json_match = re.search(r'```json\n(.*?)```', ai_response, re.DOTALL)
    if json_match:
        json_data = json.loads(json_match.group(1))
        print("AI执行结果： ",json_data)
    else:
        print("未找到JSON数据")

        