import requests
import json
import time
import numpy as np
from typing import Dict, List, Tuple, Optional
import configparser
import os
import sys

VALID_API_PASSWORD = "Bearer QdoSJjUpKEotpPRZefVO:aaJeSlTMCGWZIBKQhkTc"
VALID_API_URL = "https://spark-api-open.xf-yun.com/v2/chat/completions"

class SparkModelEvaluator:
    def __init__(self, config_path: str = None):
        # 获取当前脚本所在目录
        script_dir = os.path.dirname(os.path.abspath(__file__))
        # 如果没有提供配置文件路径，使用脚本所在目录下的config.ini
        if config_path is None:
            config_path = os.path.join(script_dir, "config.ini")
        
        self.config = self.load_config(config_path) 
        self.api_password = VALID_API_PASSWORD
        self.api_url = VALID_API_URL
        # 保存脚本目录路径，用于后续保存结果文件
        self.script_dir = script_dir
        # 评估维度及权重
        self.evaluation_dimensions = {
            "relevance": 0.3,       # 相关性
            "completeness": 0.25,   # 完整性
            "accuracy": 0.25,       # 准确性
            "clarity": 0.1,         # 清晰度
            "conciseness": 0.1      # 简洁性
        }
        # 保存评估历史
        self.evaluation_history = []
    
    def load_config(self, config_path: str) -> configparser.ConfigParser:
        config = configparser.ConfigParser()
        # 如果配置文件不存在，创建一个默认的
        if not os.path.exists(config_path):
            config["SparkAPI"] = {
                "api_password": VALID_API_PASSWORD,
                "api_url": VALID_API_URL
            }
            with open(config_path, "w") as f:
                config.write(f)
            print(f"已创建默认配置文件 {config_path}")
        
        config.read(config_path)
        
        # 如果配置文件中有有效的api_password，使用它覆盖默认值
        if config.has_option("SparkAPI", "api_password") and config.get("SparkAPI", "api_password") != VALID_API_PASSWORD:
            self.api_password = config.get("SparkAPI", "api_password")
            print(f"已从配置文件加载API密码: {self.api_password}")
        # 如果配置文件中有有效的api_url，使用它覆盖默认值
        if config.has_option("SparkAPI", "api_url") and config.get("SparkAPI", "api_url") != VALID_API_URL:
            self.api_url = config.get("SparkAPI", "api_url")
            print(f"已从配置文件加载API URL: {self.api_url}")
        return config
    
    def call_spark_model(self, prompt: str, model: str = "x1", temperature: float = 0.7) -> Tuple[Optional[str], float]:
        """
        参数:
            prompt: 提示词
            model: 模型名称
            temperature: 生成温度，控制随机性
        返回:
            模型生成的回答和耗时
        """
        headers = {
            'Authorization': self.api_password,
            'content-type': "application/json"
        }
        body = {
            "model": model,
            "user": "user_id",
            "messages": [{"role": "user", "content": prompt}],
            "stream": True,  # 使用流式响应
            "temperature": temperature
        }
        full_response = ""  # 存储完整响应
        start_time = time.time()
        try:
            response = requests.post(
                url=self.api_url,
                json=body,
                headers=headers,
                stream=True,  # 启用流式响应
                timeout=60  # 增加超时时间
            )
            # 检查响应状态码
            if response.status_code != 200:
                print(f"API请求失败: {response.status_code}, 响应内容: {response.text}")
                return None, time.time() - start_time
            # 处理流式响应
            for chunks in response.iter_lines():
                if chunks and b'[DONE]' not in chunks:
                    try:
                        if len(chunks) > 6:
                            data_org = chunks[6:]
                            try:
                                chunk = json.loads(data_org)
                                if 'choices' in chunk and len(chunk['choices']) > 0 and 'delta' in chunk['choices'][0]:
                                    text = chunk['choices'][0]['delta']
                                    if ('content' in text and '' != text['content']):
                                        content = text["content"]
                                        full_response += content
                            except json.JSONDecodeError:
                                pass
                    except Exception:
                        pass
            elapsed_time = time.time() - start_time
            return full_response, elapsed_time
            
        except requests.exceptions.RequestException as e:
            print(f"请求异常: {str(e)}")
            return None, time.time() - start_time
        except Exception as e:
            print(f"发生未知错误: {str(e)}")
            return None, time.time() - start_time
    
    def evaluate_response(self, prompt: str, response: str) -> Dict[str, float]:
        """
        使用大模型评估模型响应质量
        参数:
            prompt: 原始提示词
            response: 模型响应
        返回:
            各维度评分及总分
        """
        if not response:
            return {dimension: 0.0 for dimension in self.evaluation_dimensions}
        
        # 使用大模型进行评估
        eval_prompt = f"""
        请你作为一个提示词评估专家，根据以下原始提示词和模型响应，从五个维度进行评分（0-1分，保留2位小数）：
        
        原始提示词：{prompt}
        模型响应：{response}
        
        评估维度：
        1. 相关性（relevance）：模型响应与原始提示词的相关程度
        2. 完整性（completeness）：模型响应是否完整回答了提示词中的问题或要求
        3. 准确性（accuracy）：模型响应内容是否准确，有无错误信息
        4. 清晰度（clarity）：模型响应的表达是否清晰易懂，结构是否合理
        5. 简洁性（conciseness）：模型响应是否简洁明了，没有冗余内容
        
        请严格按照以下JSON格式返回评估结果，不要添加任何额外的解释文字：
        {{"relevance": 0.0, "completeness": 0.0, "accuracy": 0.0, "clarity": 0.0, "conciseness": 0.0}}
        """
        
        try:
            # 调用大模型进行评估
            evaluation_result, _ = self.call_spark_model(eval_prompt, temperature=0.1)
            
            if evaluation_result:
                # 解析JSON格式的评估结果
                scores = json.loads(evaluation_result)
                
                # 验证所有维度的评分都存在且在合理范围内
                for dim in self.evaluation_dimensions:
                    if dim not in scores or not isinstance(scores[dim], (int, float)):
                        scores[dim] = 0.0
                    else:
                        scores[dim] = max(0.0, min(1.0, round(float(scores[dim]), 2)))
            else:
                # 如果大模型评估失败，使用默认评分
                scores = {dim: 0.0 for dim in self.evaluation_dimensions}
                print("大模型评估失败，使用默认评分")
        except Exception as e:
            # 处理可能的异常
            print(f"评估过程中发生错误: {str(e)}")
            scores = {dim: 0.0 for dim in self.evaluation_dimensions}
        
        # 计算加权总分
        total_score = sum(scores[dim] * weight for dim, weight in self.evaluation_dimensions.items())
        scores["total"] = round(total_score, 2)
        
        return scores
    
    def evaluate_prompt(self, prompt: str, model: str = "x1", runs: int = 3) -> Dict:
        """
        评估一个提示词的效果
        
        参数:
            prompt: 要评估的提示词
            model: 使用的模型
            runs: 重复运行次数，取平均值
            
        返回:
            评估结果字典
        """
        print(f"正在评估提示词 (运行 {runs} 次): {prompt[:50]}...")
        
        all_responses = []
        all_scores = []
        total_time = 0.0
        
        for i in range(runs):
            print(f"第 {i+1}/{runs} 次运行...")
            response, elapsed = self.call_spark_model(prompt, model)
            
            if response:
                all_responses.append(response)
                total_time += elapsed
                
                # 评估当前响应
                scores = self.evaluate_response(prompt, response)
                all_scores.append(scores)
                print(f"第 {i+1} 次评分: {scores['total']}")
            else:
                print(f"第 {i+1} 次运行失败")
        
        if not all_scores:
            print("所有运行都失败了")
            return None
        
        # 计算平均得分
        avg_scores = {dim: round(np.mean([s[dim] for s in all_scores]), 2) 
                      for dim in self.evaluation_dimensions}
        avg_scores["total"] = round(np.mean([s["total"] for s in all_scores]), 2)
        
        # 计算平均耗时
        avg_time = total_time / len(all_responses) if all_responses else 0
        
        # 保存到历史记录
        evaluation_result = {
            "prompt": prompt,
            "model": model,
            "runs": runs,
            "avg_scores": avg_scores,
            "avg_time": round(avg_time, 2),
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
            "responses": all_responses
        }
        
        self.evaluation_history.append(evaluation_result)
        return evaluation_result
    
    def compare_prompts(self, prompts: List[str], model: str = "x1", runs: int = 3) -> List[Dict]:
        """
        比较多个提示词的效果
        
        参数:
            prompts: 提示词列表
            model: 使用的模型
            runs: 每个提示词的重复运行次数
            
        返回:
            按总分排序的评估结果列表
        """
        results = []
        
        for i, prompt in enumerate(prompts):
            print(f"\n评估提示词 {i+1}/{len(prompts)}")
            result = self.evaluate_prompt(prompt, model, runs)
            
            if result:
                results.append(result)
        
        # 按总分排序
        results.sort(key=lambda x: x["avg_scores"]["total"], reverse=True)
        return results
    
    def save_evaluation_results(self, filename: str = None, function_type: str = ""):
        """保存评估历史到文件，默认保存到脚本所在目录，并根据提示词和功能项生成唯一文件名"""
        # 如果没有提供文件名，根据提示词和功能项生成唯一文件名
        if filename is None:
            # 获取当前时间戳，确保唯一性
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            
            # 从最近的评估历史中提取提示词信息
            if self.evaluation_history:
                latest_result = self.evaluation_history[-1]
                # 提取提示词的前几个字符作为文件名的一部分
                prompt_preview = latest_result["prompt"][:10] if "prompt" in latest_result else "unknown"
                # 替换文件名中的非法字符
                prompt_preview = "".join(c for c in prompt_preview if c.isalnum() or c in ("_", "-", " ")).strip()
                # 如果功能项为空，尝试根据评估历史判断
                if not function_type:
                    if len(self.evaluation_history) > 1 or ("responses" in latest_result and len(latest_result["responses"]) > 1):
                        function_type = "compare"
                    else:
                        function_type = "single"
            else:
                prompt_preview = "unknown"
                function_type = function_type or "unknown"
            
            # 构建文件名
            filename = f"evaluation_results_{function_type}_{prompt_preview}_{timestamp}.json"
        
        # 如果提供的文件名不是绝对路径，则将其放在脚本所在目录下
        if not os.path.isabs(filename):
            filename = os.path.join(self.script_dir, filename)
        
        with open(filename, "w", encoding="utf-8") as f:
            json.dump(self.evaluation_history, f, ensure_ascii=False, indent=2)
        print(f"评估结果已保存到 {filename}")
    
    def print_evaluation_result(self, result: Dict, show_responses: bool = True):
        """打印评估结果"""
        print("\n" + "="*50)
        print(f"提示词: {result['prompt'][:100]}{'...' if len(result['prompt'])>100 else ''}")
        print(f"模型: {result['model']}")
        print(f"运行次数: {result['runs']}")
        print(f"平均耗时: {result['avg_time']}秒")
        print("\n评分:")
        for dim, score in result["avg_scores"].items():
            if dim != "total":
                weight = self.evaluation_dimensions[dim]
                print(f"  {dim}: {score} (权重: {weight})")
        print(f"\n总分: {result['avg_scores']['total']}")
        
        # 如果show_responses为True且结果中包含responses字段，则打印响应
        if show_responses and "responses" in result:
            print("\n响应内容:")
            for i, response in enumerate(result["responses"]):
                print(f"\n第 {i+1} 次响应:")
                print(f"{response[:500]}{'...' if len(response) > 500 else ''}")
                print("-"*50)
                
        print("="*50 + "\n")

def main():
    """主函数"""
    evaluator = SparkModelEvaluator()
    
    # 示例提示词列表
    example_prompts = [
        "什么是人工智能？请简要解释其主要应用领域。",
        "详细解释人工智能的定义、发展历程及其在医疗领域的具体应用案例。",
        "人工智能是什么？它有哪些用途？用简单易懂的语言回答。"
    ]
    
    print("欢迎使用星火大模型提示词量化评估工具")
    print("1. 评估单个提示词")
    print("2. 比较多个提示词")
    print("3. 使用示例提示词进行比较")
    
    choice = input("请选择操作 (1/2/3): ")
    
    if choice == "1":
        prompt = input("请输入要评估的提示词: ")
        runs = int(input("请输入运行次数 (默认3): ") or "3")
        result = evaluator.evaluate_prompt(prompt, runs=runs)
        if result:
            evaluator.print_evaluation_result(result, show_responses=True)
            evaluator.save_evaluation_results(function_type="single")
    
    elif choice == "2":
        num_prompts = int(input("请输入要比较的提示词数量: "))
        prompts = []
        for i in range(num_prompts):
            prompt = input(f"请输入提示词 {i+1}: ")
            prompts.append(prompt)
        
        runs = int(input("请输入每个提示词的运行次数 (默认3): ") or "3")
        results = evaluator.compare_prompts(prompts, runs=runs)
        
        print("\n===== 提示词比较结果 =====")
        for i, result in enumerate(results):
            print(f"\n排名 {i+1} (总分: {result['avg_scores']['total']})")
            print(f"提示词: {result['prompt'][:100]}{'...' if len(result['prompt'])>100 else ''}")
            
            # 询问用户是否查看当前提示词的详细结果和响应
            show_detail = input("是否查看此提示词的详细结果和响应？(y/n): ").lower()
            if show_detail == 'y':
                evaluator.print_evaluation_result(result, show_responses=True)
        
        evaluator.save_evaluation_results(function_type="compare_custom")
    
    elif choice == "3":
        print("\n使用示例提示词进行比较:")
        results = evaluator.compare_prompts(example_prompts)
        
        print("\n===== 提示词比较结果 =====")
        for i, result in enumerate(results):
            print(f"\n排名 {i+1} (总分: {result['avg_scores']['total']})")
            print(f"提示词: {result['prompt'][:100]}{'...' if len(result['prompt'])>100 else ''}")
            
            # 询问用户是否查看当前提示词的详细结果和响应
            show_detail = input("是否查看此提示词的详细结果和响应？(y/n): ").lower()
            if show_detail == 'y':
                evaluator.print_evaluation_result(result, show_responses=True)
        
        evaluator.save_evaluation_results(function_type="compare_example")

if __name__ == "__main__":
    main()
