import asyncio
import json
import logging
import os
from pathlib import Path
import re
from typing import List, Dict, Any

from jinja2 import Environment
from jsonschema import validate, ValidationError, SchemaError

from ..llm.LLM import LLMClient
from ..client.MCPClient import MCPClient
from ..client.MakeConfig import Configuration
from ..client.Session import ChatSession
from ..utils.parse_json import parse_evaluation_json
from ..prompts.env_prompt import env_prompt, not_pass_judge_prompt
from ..prompts.val_prompt import val_prompt_tool, val_prompt_eval

class ResponseValidator_withenv:
    """
    Validator using LLM-generated rules
    """
    
    def __init__(self, api_key: str = None, config_path: str = None, testcase_path: str=None, max_attempts: int = 3):
        """
        Create a new test validator
        Args:
            api_key: API key for the language model
        """
        Config_class = Configuration()
        self.config = Config_class.load_config(config_path)
        self.llm = LLMClient(api_key)
        self.testcase_path = testcase_path
        self.max_attempts = max_attempts
        with open(self.testcase_path, 'r', encoding='utf-8') as file:
            self.testcases = json.load(file)
        self.env_script = ''
        self.jinja_env = Environment()

        
        #断点续存
        self.history_results = self._load_history_results()
        self.processed_ids = {case["id"] for case in self.history_results} 

        self.server = None
        self.TOOL_VALIDATION_TIMEOUT = 180
        self.EVAL_VALIDATION_TIMEOUT = 180
    def _load_history_results(self) -> List[Dict[str, Any]]:
        """重新加载运行时已完成的结果（如果有）"""
        folder_path = os.path.dirname(self.testcase_path)
        filename = "validation_results_eval_env.json"
        result_filepath = os.path.join(folder_path, filename)
        if os.path.exists(result_filepath):
            try:
                with open(result_filepath, 'r', encoding='utf-8') as f:
                    history = json.load(f)
                if isinstance(history, list) and all("id" in case for case in history):
                    logging.info(f"成功加载历史结果，已处理 {len(history)} 个测试用例")
                    return history
                else:
                    logging.warning("历史结果文件格式无效，将重新生成结果")
                    return []
            except Exception as e:
                logging.error(f"读取历史结果失败: {e}，将重新生成结果")
                return []
        else:
            logging.info("未找到历史结果文件，将从头开始处理")
            return []
        
    async def run(self):
        # get test case
        if not self.testcases:
            Warning('No testcase found in the file {self.testcase_path}. Nothing to validate.')

        res = self.history_results.copy()

        for case in self.testcases:
            case_id = case.get("id")
            if case_id in self.processed_ids:
                logging.info(f"测试用例 ID: {case_id} 已处理，跳过")
                continue

            try:
                server_name = self.testcase_path.split('/')[-2].split('_2025')[0]
                # load config
                
                print("\n========================================")
                print(f"Validating Server: {server_name}")
                print("========================================\n")

                logging.info(f"\n--- Validating Test Case ID: {id} for Tool: {case['toolName']} ---")
                validation_log_tool = None
                try:
                    validation_log_tool = await asyncio.wait_for(
                        self.tool_validation(case, server_name) ,
                        timeout=self.TOOL_VALIDATION_TIMEOUT,
                    )
                except asyncio.TimeoutError:
                    logging.error(f"测试用例 ID: {case_id} 工具验证超时（{self.TOOL_VALIDATION_TIMEOUT}秒）")
                    validation_log_tool = {
                        "output": "",
                        "passed": False,
                        "message": f"工具验证超时（{self.TOOL_VALIDATION_TIMEOUT}秒）",
                        "reason_not_passed": "工具验证超时",
                        "option_not_passed": "",
                        "history": []
                    }
                
                logging.info("\n--- Validating End-to-end Test Case ---")
                validation_log_eval = None

                try:
                    validation_log_eval = await asyncio.wait_for(
                        self.eval_validation(case, server_name),
                        timeout=self.EVAL_VALIDATION_TIMEOUT,
                    )
                except asyncio.TimeoutError:
                    logging.error(f"测试用例 ID: {case_id} 端到端验证超时（{self.EVAL_VALIDATION_TIMEOUT}秒）")
                    validation_log_eval = {
                        "output": "",
                        "passed": False,
                        "message": f"端到端验证超时（{self.EVAL_VALIDATION_TIMEOUT}秒）",
                    }

                
                result = {
                    "id": case_id,
                    "toolName": case["toolName"],
                    "input": case["input"],
                    "description": case["description"],
                    "query": case["query"],
                    "expect": case["expect"]["status"],
                    "env_script": self.env_script,
                    "validation_tool": None,
                    "validation_eval": None}
                
                if validation_log_tool:
                    validation_log_tool_res = {
                    "output": validation_log_tool["output"],
                    "passed": validation_log_tool["passed"],
                    "rule_results": validation_log_tool["message"],
                    "reason_not_passed": validation_log_tool.get("reason_not_passed",""),
                    "option_not_passed": validation_log_tool.get("option_not_passed",""),
                    "history": validation_log_tool.get("history",[]),
                    }
                    result.update({"validation_tool": validation_log_tool_res})

                if validation_log_eval:
                    validation_log_eval_res = {
                        "output": validation_log_eval["output"],
                        "passed": validation_log_eval["passed"],
                        "message": validation_log_eval["message"],
                    }
                    result.update({"validation_eval": validation_log_eval_res})
                res.append(result)


            except Exception as e:
                logging.error(f"主程序出错: {e}")
                raise

            self.save_to_file(server_name, res)

    async def tool_validation(self, case, server_name):
        """
        验证用例工具是否通过测试。
        """
        id = case["id"]
        srv_config = self.config["mcpServers"][server_name]

        attempt = 0
        option_not_passed = "" #没有passed tool测试的原因选项 "a. Unmet special environmental requirement", "b. Issue with the validation rule itself"
        env_script = ""        
        reason_not_passed = "" #没有passed tool测试的具体原因
        server = None
        validation_log_tool = None
        history = [] 

        while attempt < self.max_attempts:
            print(f"Attempt {attempt + 1} of {self.max_attempts}")
            server = None
            try:
                # 实例化服务器
                server = MCPClient(server_name, srv_config, env_script, use_docker=True)
                await server.initialize()
                validation_log_tool = await self.validate_toolcase(server,case)
                
                # 验证通过
                if validation_log_tool.get("passed"):
                    logging.info(f"✅ 测试用例 [ID: {id}] 工具接口测试验证通过")
                    self.env_script = env_script ### 存起来获取环境部署脚本，供eval_validation使用
                    return validation_log_tool
                
                if not option_not_passed:
                    option_not_passed = self.get_not_pass_option(server, validation_log_tool, case)
                    if not option_not_passed:
                        logging.warning("🔴 未从LLM响应中提取到有效脚本,本次尝试跳过")
                        attempt += 1
                        continue
                    if option_not_passed.get("option","").startswith('b'):
                        ###如果原因是rule本身的问题，规则验证必然失败，先跳出
                        break
                    reason_not_passed = option_not_passed.get("reason","")
                    
                # 尝试获取环境部署脚本
                new_env_script = self.get_env_script(server, validation_log_tool,reason_not_passed, case,history)
                if not new_env_script:
                    logging.warning("🔴 未从LLM响应中提取到有效脚本,本次尝试跳过")
                    attempt += 1
                    continue
                logging.info(f"成功提取环境调整脚本（长度: {len(new_env_script)} 字符）")
                env_script = new_env_script
                history.append({"attempt":attempt, "env_script": new_env_script, "output":validation_log_tool["output"]})
                attempt += 1

            except Exception as e:
                logging.error(f"❌ 尝试 {attempt + 1} 失败: {str(e)}")
                attempt += 1

            finally:
                if server:
                    try:
                        self.server = server
                    except Exception as cleanup_err:
                        logging.error(f"清理服务器资源时出错: {str(cleanup_err)}")
        return validation_log_tool    
    def get_dependencies(self, server):
        dependencies = ""
        if not server.host_mcp_path:
            print("Error: server.host_mcp_path is not set")
        else:
            root = Path(server.host_mcp_path)
            for req_file in root.rglob("requirements.txt"): # 递归搜索所有子目录
                if req_file.is_file():
                    try:
                        dependencies = req_file.read_text(encoding="utf-8")
                        print(f"在 {req_file} 中找到依赖项：\n{dependencies}")
                    except Exception as e:
                        print(f"读取文件失败 {req_file}: {e}")
        return dependencies
    def get_not_pass_option(self, server, validation_log_tool, case):
        judge_prompt = ""
        
        dependencies = self.get_dependencies(server)

        judge_prompt = not_pass_judge_prompt.format(
            dependencies=dependencies,
            testcases = json.dumps(case),
            output = json.dumps(validation_log_tool["output"]),
            validation_results = json.dumps(validation_log_tool["message"]))      
        
        judge_output = self.llm.get_response([{"role": "user", "content": judge_prompt}])
        judge_json = parse_evaluation_json(judge_output)
        if not judge_json:
            return None
        logging.info(f"❌ The case is not passed due to {judge_json['reason']}")
        return judge_json

    def get_env_script(self, server, validation_log_tool, reason_not_passed, case, history):
        dependencies = self.get_dependencies(server)

        env_template = self.jinja_env.from_string(env_prompt)
        
        history_text = ""
        for part in history:
            history_text += f"Attempt {part['attempt']}:\nbash script:\n{part['env_script']}\nOutput:\n{json.dumps(part['output'])}\n\n"

        env_vars = {
                "reason": reason_not_passed,
                "dependencies":  dependencies,  
                "testcases": json.dumps(case),
                "output": json.dumps(validation_log_tool["output"]),
                "validation_results": json.dumps(validation_log_tool["message"]),
                "history": history_text,
            }


        env_prompt_formatted = env_template.render(**env_vars)
        env_output = self.llm.get_response([{"role": "user", "content": env_prompt_formatted}])
        
        script_pattern = re.compile(r'<script.*?>(.*?)</script>', re.DOTALL | re.IGNORECASE)
        matches = script_pattern.findall(env_output)
        env_script = matches[0].strip() if matches else ""
        return env_script
    
    async def validate_toolcase(self, server, case: json) -> Dict[str, Any]:
        """
        Validate a single tool case
        
        Args:
            tool_name: Name of the tool to be tested
            expect_status: Expected status of the response
            validation_rules: Rules for validating the response content
            
        Returns:
            A dictionary with validation results
        """

        # Send request to the tool via MCP server
        
        output = await server.execute_tool(case["toolName"], case["input"])

        if not output:
            print(f"No output received for Test Case ID: {case['id']}")
            return {
            "id": case["id"],
            "toolName": case["toolName"],
            "input": case["input"],
            "description": case["description"],
            "query": case["query"],
            "expect": case["expect"]['status'],
            "message": f"No output received for Test Case ID: {case['id']}",
            "passed": False,
            "output": output,
            }

        validation_rules = case["expect"]['validation_rules'] if 'validation_rules' in case['expect'] else []
        rule_results = []
        expect_status = case["expect"]['status'] if 'status' in case['expect'] else "success"


        ## 如果工具输出结果是json列表，则使用schema rule
        ## 否则使用contains rule

        type_expect = 'schema'
        for o in output:
            if not isinstance(o, dict):
                type_expect = 'contains'
            break

        if type_expect != 'schema':
            validation_rules = [rule for rule in validation_rules if rule["type"] in ['contains',"equals","llm"]]
        
        for i,rule in enumerate(validation_rules):
            valid_per_rule, message_per_rule = self.validate_single_rule(output, rule, case)

            rule_for_log = rule.copy()
            if rule_for_log.get('type')=="schema":
                rule_for_log['value'] = json.dumps(rule_for_log["value"], ensure_ascii=False, separators=(',', ':'))

            if not valid_per_rule:
                ### LLM parse失败，或者schema本身有问题，并不能说明是规则验证失败
                if message_per_rule!="Failed to parse LLM validation response." and \
                    not message_per_rule.startswith("Invalid schema -"):
                    rule_results.append({
                            f"rule": rule_for_log,
                            "rule_passed": False,
                            "rule_error": message_per_rule
                    }
                    )
            else:
                rule_results.append({
                        f"rule": rule_for_log,
                        "rule_passed": True,
                        "rule_error": "passed"
                }
                )
        passed_results = [r['rule_passed'] for r in rule_results if r['rule_passed']]
        if rule_results and len(passed_results) >= len(rule_results)/2:
            all_passed = True
            print(f"All validations passed for Test Case ID: {case['id']}")
        else:
            all_passed = False
            print(f"Validation result mismatch for Test Case ID: {case['id']}")
            print(f"Expected status: {expect_status}")
            print(f"Rule results: {rule_results}")
        
        # truncate output
        output = self.truncate_output(output)

        return {
            "id": case["id"],
            "toolName": case["toolName"],
            "input": case["input"],
            "description": case["description"],
            "query": case["query"],
            "expect": case["expect"]['status'],
            "message": rule_results,
            "passed": all_passed,
            "output": output,
        }

    def validate_single_rule(self, output: List, rule, case):
        if rule['type'] == 'contains':
            if "contain" in rule['value']:
                rule['value'] = rule['value'].split("contain")[-1]
            if "contains" in rule['value']:
                rule['value'] = rule['value'].split("contains")[-1]
            rule["value"] = self.clean_value(rule['value']).lower()
            if all(isinstance(o,dict) for o in output):
                output_cat = json.dumps(output, ensure_ascii=False, separators=(',', ':'))
            else:
                output_cat = ' '.join(str(o) for o in output)
            if rule['value'] in output_cat.lower():
                return True, f""
            else:
                return False, f"Output does not contain expected substring: {rule['value']}"


        elif rule['type'] == 'equals':
            if not isinstance(rule['value'],list):
                rule['value'] = [rule['value']]
            if rule['value'] == output:
                return True,f""
            else:
                return False, f"Output does not exactly equal expected value: {rule['value']}"
            
        elif rule['type'] == 'schema':
            if not isinstance(rule['value'],dict):
                return False, f"Invalid schema"
            if rule['value'].get('type') == 'array':
                try:
                    validate(instance=output, schema=rule['value'])
                    return True, "Schema validation passed."
                except ValidationError as e:
                    return False, f"Schema validation failed: {str(e)}"
                except SchemaError as e:
                    return False, f"Invalid schema - {str(e)}"

            elif rule['value'].get('type') == 'object':
                try:
                    validate(instance=output[0], schema=rule['value'])
                    return True, "Schema validation passed."
                except ValidationError as e:
                    return False, f"Schema validation failed: {str(e)}"
                except SchemaError as e:
                    return False, f"Invalid schema - {str(e)}"
        
        elif rule['type'] == 'llm':
            output = '\n'.join(str(o) for o in output)
            valid, message =  self.llm_rule_validation(output, rule, case)
            return valid, message
        
    
    def clean_value(self, raw_value: str) -> str:
        BAD_CHARS = {'"', ':', '!', '@', '$', '%', '&', ';', ',', '.', ' ', '\t', '\n',"'"}
        cleaned = raw_value.strip(''.join(BAD_CHARS)) 

        while cleaned and cleaned[-1] in BAD_CHARS:
            cleaned = cleaned[:-1]  

        if not cleaned:
            return ""  
        
        return cleaned


    def llm_rule_validation(self, output: str, rule: json, case: json) -> Dict[str, Any]:    
        
        try:
            val_prompt_tool_formatted = val_prompt_tool.format(
                tool_name=case["toolName"],
                input=case["input"],
                validation_rule=rule['value'],
                output=output
            )
            llm_response = self.llm.get_response([{"role": "user", "content": val_prompt_tool_formatted}])
            llm_result = parse_evaluation_json(llm_response)
            content_valid = llm_result.get('answer', '').lower() == 'yes'
            message = llm_result.get('explanation', '')
            return content_valid, message
        except json.JSONDecodeError:
            Warning("Failed to parse LLM validation response.")
            return False,"Failed to parse LLM validation response." 
    
    async def eval_validation(self, case, server_name):
        """
        通过端到端测试。
        """
        id = case["id"]
        srv_config = self.config["mcpServers"][server_name]
        if not self.server:
            self.server = MCPClient(server_name, srv_config)
            await self.server.initialize()
        validation_log_eval = None
        validation_log_eval = await self.validate_evalcase(self.server, case)

        if validation_log_eval.get("passed"):
            logging.info(f"✅ 测试用例 [ID: {id}] 端到端测试验证通过")
        else:
            logging.error(f"❌ 测试用例 [ID: {id}] 端到端测试验证失败")
        await self.server.cleanup()
        return validation_log_eval  

    async def validate_evalcase(self, server, case):
        query = case["query"]
        self.session = ChatSession(server, self.llm)
        tool_included_or_not, tool_info, session_result = await self.session.handle_query(query)
        eval_results = []
        if tool_included_or_not:
            tool_name = tool_info["tool_name"]
            expected_tool = case["toolName"]
            ## 首先判断是否调用正确的工具
            if tool_name != expected_tool:
                eval_results= {
                    "passed": False,
                    "message": f"Expect using tool: {expected_tool}, but got {tool_name}",
                    }

            else:
                expect_rules = case["expect"]['validation_rules'] if 'validation_rules' in case['expect'] else []
                
                res_expect = []
                for rule in expect_rules:
                    if rule["type"] in ["contains","equals"]:
                        res_expect.append(rule['message'])
                    elif rule["type"] == "llm":
                        res_expect.append(rule['value'])

                expect_results = ' '.join(res_expect)

                if len(session_result)>1000:
                    session_result = session_result[:800] + '...' + session_result[-200:]


                template = self.jinja_env.from_string(val_prompt_eval)
                if expect_results:
                    test_case_vars = {
                        "query": case["query"],
                        "expect_type":  case["expect"]['status'],  
                        "expected_output": "**Expected Output:** " + expect_results,
                        "output": session_result
                    }
                else:
                    test_case_vars = {
                        "query": case["query"],
                        "expect_type":  case["expect"]['status'], 
                        "expected_output": '',
                        "output": session_result
                    }

                val_prompt_eval_formatted = template.render(**test_case_vars)
                val_response = self.llm.get_response([{"role": "user", "content": val_prompt_eval_formatted}])

            
                val_result = parse_evaluation_json(val_response)
                if val_result:
                    passed = val_result.get('answer', '').lower() == 'yes'
                    message = val_result.get('explanation', '')
                    eval_results= {
                        "passed": passed,
                        "message": message,
                    }       
                else:
                    eval_results= {
                        "passed": False,
                        "message": "Failed to parse LLM evaluation response."
                    }
                    
        else:
            eval_results= {
                "passed": False,
                "message": "No tool was called in the response."
            }
        # truncate output
        if isinstance(session_result, str):
            output = session_result[:800]+'...'+session_result[-200:] if len(session_result)>1000 else session_result
        elif isinstance(session_result, list):
            output = session_result[:10] if len(session_result)>10 else session_result
        
        return {
            "id": case["id"],
            "toolName": case["toolName"],
            "input": case["input"],
            "description": case["description"],
            "query": case["query"],
            "expect": case["expect"],
            "message": eval_results["message"],
            "passed": eval_results['passed'],
            "output": output,}

    def truncate_output(self, output):
        if isinstance(output, list):
            output = output[:10] if len(output)>10 else output
            if all(isinstance(o, dict) for o in output):
                output =  [json.dumps(o, ensure_ascii=False, separators=(',', ':')) for o in output]
        elif isinstance(output, str):
            output = output[:800] + '...' + output[-200:] if len(output)>1000 else output
        return output
    
    def save_to_file(self, server_name: str, validationlog: List) -> bool:
        """
        save test cases (array of JSON) to file
        """
        try:
            if not isinstance(validationlog, list):
                raise ValueError("input data should be an array of JSON")
            
            folerpath = os.path.dirname(self.testcase_path) 
            filename = "validation_results_eval_env.json"
            filepath = os.path.join(folerpath,filename)
            with open(filepath, 'w', encoding='utf-8') as file:
                json.dump(validationlog, file, ensure_ascii=False, indent=4)
            print(f"{server_name} validation results are successfully saved into {filepath}")
            return True
        except IOError as e:
            print(f"文件操作错误: {e}")
        except ValueError as e:
            print(f"数据格式错误: {e}")
        except Exception as e:
            print(f"发生未知错误: {e}")
        
        return False
    
