#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
main1.py - 智能运维执行流程控制器
结合沙盒执行(main.py)和麒麟端执行(Kylin.py)的完整流程
"""

import os
import sys
import json
import logging
import time
import re
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Any, Tuple

# 添加项目路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# 导入相关模块
from main import KylinAutoOptimizer
from Kylin import get_latest_ansible_scripts, run_all_latest_scripts

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("main1_execution.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class IntelligentOptimizationController:
    """智能运维执行流程控制器"""
    
    def __init__(self):
        """初始化控制器"""
        self.sandbox_optimizer = None
        self.current_time = datetime.now()
        self.test_id = self.current_time.strftime("%Y%m%d%H%M%S")
        self.result_dir_name = self.current_time.strftime("%Y%m%d%H%M%S")
        
        # 路径配置
        self.ansible_scripts_dir = "agent-mcp/Ansible"
        self.local_result_dir = "python/ansible-getsystem"
        self.sandbox_result_dir = os.path.join(self.local_result_dir, self.result_dir_name)
        
        logger.info(f"初始化智能运维控制器")
        logger.info(f"测试ID: {self.test_id}")
        logger.info(f"结果目录: {self.result_dir_name}")
    
    def get_latest_scripts(self) -> List[Path]:
        """获取最新日期目录下的所有脚本"""
        logger.info("获取最新日期目录下的脚本...")
        
        try:
            script_files = get_latest_ansible_scripts()
            if script_files:
                logger.info(f"找到 {len(script_files)} 个脚本文件")
                for script in script_files:
                    logger.info(f"  - {script.name}")
            else:
                logger.warning("未找到可执行的脚本文件")
            
            return script_files
            
        except Exception as e:
            logger.error(f"获取脚本文件失败: {e}")
            return []
    
    def execute_sandbox_optimization(self, script_files: List[Path]) -> Dict[str, Any]:
        """执行沙盒优化流程"""
        logger.info("开始执行沙盒优化流程...")
        
        try:
            # 初始化沙盒优化器
            self.sandbox_optimizer = KylinAutoOptimizer(
                host="192.168.100.135",
                username="root",
                private_key_path="C:/Users/32793/.ssh/id_rsa",
                local_result_dir=self.local_result_dir,
                ansible_scripts_dir=self.ansible_scripts_dir
            )
            
            # 准备测试脚本
            test_script = self._generate_test_script()
            
            # 准备优化脚本列表
            optimization_scripts = []
            for script_file in script_files:
                script_content = self._read_script_content(script_file)
                if script_content:
                    optimization_scripts.append({
                        'name': script_file.name,
                        'content': script_content
                    })
            
            logger.info(f"准备执行 {len(optimization_scripts)} 个优化脚本")
            
            # 执行沙盒优化
            result_data = self.sandbox_optimizer.upload_and_run_scripts(
                test_script, 
                optimization_scripts, 
                self.test_id
            )
            
            logger.info("沙盒优化流程执行完成")
            return result_data
            
        except Exception as e:
            logger.error(f"沙盒优化执行失败: {e}")
            return {"status": "error", "message": str(e)}
    
    def _generate_test_script(self) -> str:
        """生成测试脚本"""
        return """#!/usr/bin/env python3
import time
import json
import psutil
import sys

print("开始性能测试...")
start_time = time.time()

# 模拟不同负载
results = []
for i in range(5):
    print(f"测试迭代 {i+1}/5")
    iter_start = time.time()

    # CPU密集型任务
    total = 0
    for j in range(10**7):
        total += j * 0.0001

    # 内存分配
    data = [0] * 10**6

    # 获取资源使用情况
    iter_time = time.time() - iter_start
    cpu_percent = psutil.cpu_percent(interval=0.1)
    mem_usage = psutil.Process().memory_info().rss / (1024 * 1024)  # MB

    results.append({
        "iter": i+1,
        "time": iter_time,
        "cpu": cpu_percent,
        "memory": mem_usage
    })
    time.sleep(1)  # 间隔

# 计算总体指标
total_time = time.time() - start_time
avg_time = sum(r['time'] for r in results) / len(results)
avg_cpu = sum(r['cpu'] for r in results) / len(results)
max_memory = max(r['memory'] for r in results)

print("\\n===== 测试摘要 =====")
print(f"总执行时间: {total_time:.2f}秒")
print(f"平均迭代时间: {avg_time:.2f}秒")
print(f"平均CPU使用率: {avg_cpu:.1f}%")
print(f"最大内存使用: {max_memory:.2f}MB")

# 结构化输出性能指标
metrics = {
    "total_time": total_time,
    "avg_iter_time": avg_time,
    "avg_cpu_usage": avg_cpu,
    "max_memory_mb": max_memory,
    "iterations": results
}

print("\\n===== 性能指标 =====")
print(json.dumps(metrics, indent=2))
sys.exit(0)
"""
    
    def _read_script_content(self, script_path: Path) -> str:
        """读取脚本文件内容"""
        try:
            with open(script_path, 'r', encoding='utf-8') as f:
                content = f.read()
            return content
        except Exception as e:
            logger.error(f"读取脚本失败 {script_path}: {e}")
            return None
    
    def analyze_performance_results(self) -> Dict[str, Any]:
        """分析性能测试结果"""
        logger.info("开始分析性能测试结果...")
        
        try:
            # 检查结果目录是否存在
            if not os.path.exists(self.sandbox_result_dir):
                logger.error(f"结果目录不存在: {self.sandbox_result_dir}")
                return {"status": "error", "message": "结果目录不存在"}
            
            # 查找所有before和after文件
            before_files = []
            after_files = []
            
            for file in os.listdir(self.sandbox_result_dir):
                if file.endswith('_before.txt'):
                    before_files.append(file)
                elif file.endswith('_after.txt'):
                    after_files.append(file)
            
            logger.info(f"找到 {len(before_files)} 个before文件和 {len(after_files)} 个after文件")
            
            # 分析每个脚本的性能变化
            analysis_results = []
            
            for before_file in before_files:
                # 查找对应的after文件
                script_name = before_file.replace('_before.txt', '')
                after_file = f"{script_name}_after.txt"
                
                if after_file in after_files:
                    before_path = os.path.join(self.sandbox_result_dir, before_file)
                    after_path = os.path.join(self.sandbox_result_dir, after_file)
                    
                    # 分析性能变化
                    performance_analysis = self._analyze_performance_change(
                        before_path, after_path, script_name
                    )
                    analysis_results.append(performance_analysis)
            
            # 生成分析报告
            analysis_report = {
                "test_id": self.test_id,
                "result_directory": self.result_dir_name,
                "total_scripts": len(analysis_results),
                "analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "performance_analysis": analysis_results,
                "overall_assessment": self._generate_overall_assessment(analysis_results)
            }
            
            # 保存分析报告
            report_path = os.path.join(self.sandbox_result_dir, "performance_analysis_report.json")
            with open(report_path, 'w', encoding='utf-8') as f:
                json.dump(analysis_report, f, indent=2, ensure_ascii=False)
            
            logger.info(f"性能分析报告已保存: {report_path}")
            return analysis_report
            
        except Exception as e:
            logger.error(f"性能分析失败: {e}")
            return {"status": "error", "message": str(e)}
    
    def _analyze_performance_change(self, before_path: str, after_path: str, script_name: str) -> Dict[str, Any]:
        """分析单个脚本的性能变化"""
        try:
            # 读取before和after文件内容
            with open(before_path, 'r', encoding='utf-8') as f:
                before_content = f.read()
            
            with open(after_path, 'r', encoding='utf-8') as f:
                after_content = f.read()
            
            # 解析性能指标
            before_metrics = self._parse_performance_metrics(before_content)
            after_metrics = self._parse_performance_metrics(after_content)
            
            # 计算性能变化
            performance_changes = {}
            for metric in ['total_time', 'avg_iter_time', 'avg_cpu_usage', 'max_memory_mb']:
                if metric in before_metrics and metric in after_metrics:
                    before_val = before_metrics[metric]
                    after_val = after_metrics[metric]
                    
                    if before_val != 0:
                        change_percent = ((after_val - before_val) / before_val) * 100
                    else:
                        change_percent = 0
                    
                    performance_changes[metric] = {
                        "before": before_val,
                        "after": after_val,
                        "change_percent": change_percent,
                        "improvement": change_percent < 0  # 负值表示改进
                    }
            
            # 生成分析结果
            analysis_result = {
                "script_name": script_name,
                "before_file": os.path.basename(before_path),
                "after_file": os.path.basename(after_path),
                "performance_changes": performance_changes,
                "overall_improvement": self._calculate_overall_improvement(performance_changes),
                "recommendation": self._generate_recommendation(performance_changes)
            }
            
            return analysis_result
            
        except Exception as e:
            logger.error(f"分析性能变化失败 {script_name}: {e}")
            return {
                "script_name": script_name,
                "error": str(e),
                "overall_improvement": False,
                "recommendation": "analysis_failed"
            }
    
    def _parse_performance_metrics(self, content: str) -> Dict[str, float]:
        """解析性能指标"""
        metrics = {}
        
        try:
            # 查找JSON格式的性能指标
            json_match = re.search(r'\{.*\}', content, re.DOTALL)
            if json_match:
                json_str = json_match.group()
                data = json.loads(json_str)
                
                metrics = {
                    "total_time": data.get("total_time", 0),
                    "avg_iter_time": data.get("avg_iter_time", 0),
                    "avg_cpu_usage": data.get("avg_cpu_usage", 0),
                    "max_memory_mb": data.get("max_memory_mb", 0)
                }
            else:
                # 尝试从文本中提取数值
                patterns = {
                    "total_time": r"总执行时间:\s*([\d.]+)",
                    "avg_iter_time": r"平均迭代时间:\s*([\d.]+)",
                    "avg_cpu_usage": r"平均CPU使用率:\s*([\d.]+)",
                    "max_memory_mb": r"最大内存使用:\s*([\d.]+)"
                }
                
                for metric, pattern in patterns.items():
                    match = re.search(pattern, content)
                    if match:
                        metrics[metric] = float(match.group(1))
                
        except Exception as e:
            logger.warning(f"解析性能指标失败: {e}")
        
        return metrics
    
    def _calculate_overall_improvement(self, performance_changes: Dict[str, Any]) -> bool:
        """计算整体改进情况"""
        improvements = 0
        total_metrics = 0
        
        for metric, change in performance_changes.items():
            if isinstance(change, dict) and "improvement" in change:
                total_metrics += 1
                if change["improvement"]:
                    improvements += 1
        
        # 如果超过50%的指标有改进，则认为整体有改进
        return total_metrics > 0 and (improvements / total_metrics) > 0.5
    
    def _generate_recommendation(self, performance_changes: Dict[str, Any]) -> str:
        """生成优化建议"""
        improvements = 0
        degradations = 0
        
        for metric, change in performance_changes.items():
            if isinstance(change, dict) and "improvement" in change:
                if change["improvement"]:
                    improvements += 1
                else:
                    degradations += 1
        
        if improvements > degradations:
            return "recommend_kylin_execution"
        elif degradations > improvements:
            return "recommend_further_optimization"
        else:
            return "recommend_monitoring"
    
    def _generate_overall_assessment(self, analysis_results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """生成整体评估"""
        total_scripts = len(analysis_results)
        improved_scripts = sum(1 for result in analysis_results if result.get("overall_improvement", False))
        
        improvement_rate = (improved_scripts / total_scripts * 100) if total_scripts > 0 else 0
        
        return {
            "total_scripts": total_scripts,
            "improved_scripts": improved_scripts,
            "improvement_rate": improvement_rate,
            "overall_recommendation": "recommend_kylin_execution" if improvement_rate >= 50 else "recommend_monitoring"
        }
    
    def should_execute_kylin(self, analysis_report: Dict[str, Any]) -> bool:
        """判断是否应该执行麒麟端优化"""
        logger.info("判断是否执行麒麟端优化...")
        
        # 默认全部判断通过
        logger.info("默认判断: 执行麒麟端优化")
        return True
        
        # 以下是可选的判断逻辑（当前被注释掉）
        """
        try:
            overall_assessment = analysis_report.get("overall_assessment", {})
            improvement_rate = overall_assessment.get("improvement_rate", 0)
            
            # 如果改进率超过50%，则执行麒麟端优化
            should_execute = improvement_rate >= 50
            
            logger.info(f"改进率: {improvement_rate:.1f}%")
            logger.info(f"执行麒麟端优化: {'是' if should_execute else '否'}")
            
            return should_execute
            
        except Exception as e:
            logger.error(f"判断执行条件失败: {e}")
            return False
        """
    
    def execute_kylin_optimization(self) -> Dict[str, Any]:
        """执行麒麟端优化"""
        logger.info("开始执行麒麟端优化...")
        
        try:
            # 执行麒麟端优化
            kylin_results = run_all_latest_scripts(self.test_id)
            
            logger.info("麒麟端优化执行完成")
            return {
                "status": "success",
                "kylin_results": kylin_results,
                "execution_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            }
            
        except Exception as e:
            logger.error(f"麒麟端优化执行失败: {e}")
            return {"status": "error", "message": str(e)}
    
    def run_complete_workflow(self) -> Dict[str, Any]:
        """运行完整的执行流程"""
        logger.info("开始执行完整的智能运维流程")
        logger.info("=" * 60)
        
        workflow_result = {
            "workflow_id": self.test_id,
            "start_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "steps": []
        }
        
        try:
            # 步骤1: 获取最新脚本
            logger.info("步骤1: 获取最新脚本")
            script_files = self.get_latest_scripts()
            workflow_result["steps"].append({
                "step": "get_scripts",
                "status": "success" if script_files else "failed",
                "script_count": len(script_files)
            })
            
            if not script_files:
                logger.error("未找到可执行的脚本，流程终止")
                return workflow_result
            
            # 步骤2: 执行沙盒优化
            logger.info("步骤2: 执行沙盒优化")
            sandbox_result = self.execute_sandbox_optimization(script_files)
            workflow_result["steps"].append({
                "step": "sandbox_optimization",
                "status": "success" if sandbox_result.get("status") != "error" else "failed",
                "result": sandbox_result
            })
            
            if sandbox_result.get("status") == "error":
                logger.error("沙盒优化失败，流程终止")
                return workflow_result
            
            # 步骤3: 分析性能结果
            logger.info("步骤3: 分析性能结果")
            analysis_report = self.analyze_performance_results()
            workflow_result["steps"].append({
                "step": "performance_analysis",
                "status": "success" if analysis_report.get("status") != "error" else "failed",
                "analysis_report": analysis_report
            })
            
            if analysis_report.get("status") == "error":
                logger.error("性能分析失败，流程终止")
                return workflow_result
            
            # 步骤4: 判断是否执行麒麟端优化
            logger.info("步骤4: 判断执行条件")
            should_execute = self.should_execute_kylin(analysis_report)
            workflow_result["steps"].append({
                "step": "execution_decision",
                "status": "success",
                "should_execute_kylin": should_execute
            })
            
            # 步骤5: 执行麒麟端优化（如果条件满足）
            if should_execute:
                logger.info("步骤5: 执行麒麟端优化")
                kylin_result = self.execute_kylin_optimization()
                workflow_result["steps"].append({
                    "step": "kylin_optimization",
                    "status": "success" if kylin_result.get("status") == "success" else "failed",
                    "kylin_result": kylin_result
                })
            else:
                logger.info("步骤5: 跳过麒麟端优化")
                workflow_result["steps"].append({
                    "step": "kylin_optimization",
                    "status": "skipped",
                    "reason": "execution_condition_not_met"
                })
            
            # 完成流程
            workflow_result["end_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            workflow_result["status"] = "completed"
            
            logger.info("完整流程执行完成")
            logger.info("=" * 60)
            
            return workflow_result
            
        except Exception as e:
            logger.error(f"流程执行失败: {e}")
            workflow_result["end_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            workflow_result["status"] = "failed"
            workflow_result["error"] = str(e)
            return workflow_result
        
        finally:
            # 清理资源
            if self.sandbox_optimizer:
                self.sandbox_optimizer.close()


def main():
    """主函数"""
    print("🤖 智能运维执行流程控制器")
    print("=" * 60)
    
    # 创建控制器实例
    controller = IntelligentOptimizationController()
    
    # 执行完整流程
    result = controller.run_complete_workflow()
    
    # 输出结果摘要
    print("\n📊 执行结果摘要:")
    print(f"流程ID: {result.get('workflow_id')}")
    print(f"状态: {result.get('status')}")
    print(f"开始时间: {result.get('start_time')}")
    print(f"结束时间: {result.get('end_time')}")
    
    if result.get('status') == 'completed':
        print("✅ 流程执行成功")
        
        # 显示步骤结果
        print("\n📋 步骤执行情况:")
        for step in result.get('steps', []):
            step_name = step.get('step', 'unknown')
            step_status = step.get('status', 'unknown')
            print(f"  {step_name}: {step_status}")
            
            # 显示详细信息
            if step_name == 'get_scripts':
                print(f"    脚本数量: {step.get('script_count', 0)}")
            elif step_name == 'performance_analysis':
                analysis_report = step.get('analysis_report', {})
                overall_assessment = analysis_report.get('overall_assessment', {})
                print(f"    改进率: {overall_assessment.get('improvement_rate', 0):.1f}%")
            elif step_name == 'execution_decision':
                print(f"    执行麒麟端优化: {'是' if step.get('should_execute_kylin') else '否'}")
    else:
        print("❌ 流程执行失败")
        print(f"错误信息: {result.get('error', '未知错误')}")
    
    # 保存结果到文件
    result_file = f"workflow_result_{result.get('workflow_id')}.json"
    with open(result_file, 'w', encoding='utf-8') as f:
        json.dump(result, f, indent=2, ensure_ascii=False)
    
    print(f"\n💾 结果已保存到: {result_file}")
    print("\n🎉 执行完成！")


if __name__ == "__main__":
    main() 