import os
import asyncio
from typing import Dict, Any, Optional
from datetime import datetime
from ollama import Client
import json

# 导入置信度分析器
from ..confidence_module.confidence_analyzer import ConfidenceAnalyzer

class OllamaLLM:
    """Ollama LLM客户端封装"""
    
    def __init__(self, base_url: str, model: str):
        self.base_url = base_url
        self.model = model
        self.client = Client(host=base_url)
    
    def invoke(self, prompt: str) -> str:
        """调用Ollama模型"""
        try:
            response = self.client.chat(
                model=self.model,
                messages=[{"role": "user", "content": prompt}]
            )
            return response['message']['content']
        except Exception as e:
            raise Exception(f"Ollama调用失败: {e}")

class LLMAnalyzer:
    """LLM分析器 - 处理低置信度异常"""
    
    def __init__(self, confidence_threshold: float = 0.8):
        """
        初始化LLM分析器
        
        Args:
            confidence_threshold: 置信度阈值，低于此值将触发LLM分析
        """
        # 初始化置信度分析器
        self.confidence_analyzer = ConfidenceAnalyzer(threshold=confidence_threshold)
        self.confidence_threshold = confidence_threshold
        
        # 初始化Ollama LLM客户端
        self.llm = OllamaLLM(
            base_url="http://10.11.5.14:11434", 
            model="deepseek-r1:14b"
        )
        
        # 定义调优工具映射
        self.tuning_tools = {
            "cpu": ["cpupower", "numactl", "irqbalance", "tuned", "taskset"],
            "memory": ["numad", "earlyoom", "cgroup-tools", "echo"],
            "disk": ["fio", "iostat", "iotop", "blktrace", "hdparm"],
            "network": ["ethtool", "netstat", "ss", "iperf", "tc"],
            "kernel": ["sysctl", "tuned", "numad", "echo"],
            "processes": ["cgroup-tools", "numactl", "taskset", "nice", "renice"],
            "system_metrices": ["tuned", "numad", "earlyoom", "sysctl"],
            "linux_sysctl_fs": ["sysctl", "echo", "mount", "tune2fs"]
        }
        
        # 定义异常类型到工具类型的映射
        self.anomaly_type_mapping = {
            "cpu_usage": "cpu",
            "memory_leak": "memory", 
            "disk_io": "disk",
            "network_issue": "network",
            "kernel_issue": "kernel",
            "process_issue": "processes",
            "system_metric": "system_metrices",
            "filesystem_issue": "linux_sysctl_fs"
        }

    async def process_anomaly(self, anomaly: Dict[str, Any], ansible_dir: str) -> Dict[str, Any]:
        """
        处理异常指标（置信度已确认低于阈值）

        Args:
            anomaly: 异常数据字典
            ansible_dir: Ansible目录路径

        Returns:
            Dict: 处理结果
        """
        try:
            # 1. 获取置信度信息（仅用于记录）
            confidence_score = self.confidence_analyzer.analyze_confidence(anomaly)
            confidence_level = self.confidence_analyzer.get_confidence_level(confidence_score)
            
            print(f"置信度较低 ({confidence_score:.3f})，启动LLM分析...")
            
            # 2. 分析异常类型
            anomaly_type = self._analyze_anomaly_type(anomaly)

            # 3. 生成LLM提示词
            prompt = self._generate_prompt(anomaly, anomaly_type)

            # 4. 调用LLM生成脚本
            generated_script = await self._call_llm(prompt)

            # 5. 创建时间戳目录并保存脚本
            script_file = await self._save_generated_script(
                generated_script, anomaly, anomaly_type, ansible_dir
            )

            return {
                "success": True,
                "anomaly_id": anomaly.get("anomaly_id"),
                "confidence_score": confidence_score,
                "confidence_level": confidence_level,
                "anomaly_type": anomaly_type,
                "generated_script": script_file,
                "prompt": prompt,
                "processing_method": "llm_analysis"
            }

        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "anomaly_id": anomaly.get("anomaly_id"),
                "processing_method": "llm_analysis"
            }

    def _analyze_anomaly_type(self, anomaly: Dict[str, Any]) -> str:
        """分析异常类型"""
        issue_type = anomaly.get("issue_type", "").lower()
        root_cause = anomaly.get("root_cause", "").lower()

        # 关键词匹配
        if any(keyword in root_cause for keyword in ["cpu", "processor", "load", "core"]):
            return "cpu"
        elif any(keyword in root_cause for keyword in ["memory", "ram", "oom", "swap"]):
            return "memory"
        elif any(keyword in root_cause for keyword in ["disk", "io", "storage", "filesystem"]):
            return "disk"
        elif any(keyword in root_cause for keyword in ["network", "connection", "latency", "bandwidth"]):
            return "network"
        elif any(keyword in root_cause for keyword in ["kernel", "system", "sysctl"]):
            return "kernel"
        elif any(keyword in root_cause for keyword in ["process", "thread", "pid"]):
            return "processes"
        elif any(keyword in root_cause for keyword in ["metric", "performance", "system"]):
            return "system_metrices"
        elif any(keyword in root_cause for keyword in ["filesystem", "fs", "mount"]):
            return "linux_sysctl_fs"
        else:
            return "system_metrices"  # 默认类型

    def _generate_prompt(self, anomaly: Dict[str, Any], anomaly_type: str) -> str:
        """生成LLM提示词"""
        
        # 获取对应的调优工具
        available_tools = self.tuning_tools.get(anomaly_type, [])
        tools_str = ", ".join(available_tools)
        
        prompt = f'''我现在是一个银河麒麟服务器v10操作系统的运维管家，我现在要写关于{anomaly_type}调优脚本。

异常信息：
- 异常ID: {anomaly.get('anomaly_id', 'unknown')}
- 问题类型: {anomaly.get('issue_type', 'unknown')}
- 根本原因: {anomaly.get('root_cause', 'unknown')}
- 严重程度: {anomaly.get('severity', 0)}
- 贡献因素: {', '.join(anomaly.get('contributing_factors', []))}
- 证据: {', '.join(anomaly.get('evidence', []))}

可用的调优工具: {tools_str}

脚本的写作格式参考下面脚本：文件名log.sh
里面内容：
#!/bin/bash
echo "===== 开始性能测试 ====="

# 记录开始时间
start_time=$(date +%s.%N)

# 运行调优命令
CPU_THRESHOLD=90
TOP_PID=$(ps -eo pid,pcpu --sort=-pcpu | grep -v "PID" | head -n 1 | awk '{{print $1}}')
TOP_CPU_USAGE=$(ps -p $TOP_PID -o %cpu= | awk '{{print int($1)}}')
if [ "$TOP_CPU_USAGE" -ge "$CPU_THRESHOLD" ]; then
    kill -9 $TOP_PID
fi
# 收集指标
cpu_score=$(grep "events per second" sysbench_result.txt | awk '{{print $4}}')
memory_usage=$(free -m | awk '/Mem:/ {{print $3}}')
disk_io=$(iostat -d -k | awk '/sda/ {{print $3}}')

# 记录结束时间
end_time=$(date +%s.%N)
duration=$(echo "$end_time - $start_time" | bc)

echo "===== 测试摘要 ====="
echo "总执行时间: ${{duration}}秒"
echo "CPU性能分数: ${{cpu_score}} events/sec"
echo "内存使用量: ${{memory_usage}}MB"
echo "磁盘IO: ${{disk_io}}KB/s"

# 结构化输出
echo "===== 性能指标 ====="
echo '{{
    "total_time": "'$duration'",
    "cpu_score": "'$cpu_score'",
    "memory_usage": "'$memory_usage'",
    "disk_io": "'$disk_io'"
}}'

根据上面脚本的写作风格，生成当前异常可以用什么的脚本进行修复调优（可以使用{tools_str}等轻量级的调优工具），其中对系统的调优脚本应出于 " 调优可实现性 > 调优有效性 > 调优效果 > 调优效率"的原则。

要求：
1. 只输出完整的脚本代码，不要包含任何解释说明、思考过程或<think>标签
2. 不要包含```bash或```等markdown标记，直接输出脚本内容
3. 脚本必须包含错误处理机制、日志记录功能、权限检查
4. 使用具体的调优命令，结合异常类型和根因
5. 包含调优效果验证和结构化输出性能指标
6. 严格按照上述模板格式生成bash脚本
7. 脚本要针对{anomaly_type}类型的问题进行优化
8. 直接输出脚本内容，不要包含任何AI思考过程或markdown标记

请生成对应的脚本：'''
        
        return prompt

    async def _call_llm(self, prompt: str) -> str:
        """调用LLM生成脚本"""
        try:
            # 使用异步执行器调用LLM
            response = await asyncio.get_event_loop().run_in_executor(
                None, 
                lambda: self.llm.invoke(prompt)
            )
            return response
        except Exception as e:
            error_msg = f"LLM调用失败: {e}"
            print(error_msg)
            raise Exception(error_msg)

    async def _save_generated_script(self, script_content: str, anomaly: Dict[str, Any], 
                                   anomaly_type: str, ansible_dir: str) -> str:
        """保存生成的脚本到时间戳目录"""

        # 直接使用传入的ansible_dir（已经是时间戳目录）
        # 生成脚本文件名，使用异常ID来区分不同的脚本
        anomaly_id = anomaly.get("anomaly_id", "unknown")
        script_name = f"{anomaly_type}_调优脚本_{anomaly_id}.sh"
        script_path = os.path.join(ansible_dir, script_name)
        
        # 清理脚本内容（移除可能的markdown标记）
        cleaned_script = self._clean_script_content(script_content)
        
        # 写入脚本文件
        with open(script_path, 'w', encoding='utf-8') as f:
            f.write(cleaned_script)
        
        # 设置执行权限
        os.chmod(script_path, 0o755)
        
        print(f"脚本已保存到: {script_path}")
        return script_path 

    def _clean_script_content(self, script_content: str) -> str:
        """清理脚本内容，移除markdown标记和AI思考内容"""
        # 移除AI思考内容（<think>...</think>标签）
        import re
        # 移除<think>标签及其内容
        script_content = re.sub(r'<think>.*?</think>', '', script_content, flags=re.DOTALL)
        
        # 移除markdown代码块标记
        # 移除开头的```bash或```
        script_content = re.sub(r'^```(?:bash)?\s*', '', script_content, flags=re.MULTILINE)
        # 移除结尾的```
        script_content = re.sub(r'\s*```$', '', script_content, flags=re.MULTILINE)
        
        # 移除可能的多余空白行
        lines = script_content.split('\n')
        cleaned_lines = []
        for line in lines:
            # 跳过空行或只包含空白字符的行
            if line.strip():
                cleaned_lines.append(line)
        
        # 重新组合内容
        script_content = '\n'.join(cleaned_lines)
        
        # 移除开头和结尾的空白字符
        script_content = script_content.strip()
        
        return script_content

    def get_confidence_info(self, anomaly: Dict[str, Any]) -> Dict[str, Any]:
        """获取置信度信息"""
        confidence_score = self.confidence_analyzer.analyze_confidence(anomaly)
        confidence_level = self.confidence_analyzer.get_confidence_level(confidence_score)
        
        return {
            "confidence_score": confidence_score,
            "confidence_level": confidence_level,
            "threshold": self.confidence_threshold,
            "needs_llm": confidence_score < self.confidence_threshold
        } 