#!/usr/bin/env python3
"""
Rules Conflict Analyzer - 智能规则冲突分析引擎
检测rules目录下的重复、矛盾和冗余内容，提供最优解决方案

基于智能闭环演进，实现：
1. 深度内容分析
2. 语义相似度检测
3. 冲突识别
4. 最优化建议
"""

import os
import sys
import yaml
import json
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Any, Set, Tuple
from collections import defaultdict
import re

class RulesConflictAnalyzer:
    """智能规则冲突分析引擎"""
    
    def __init__(self, project_root: Path = None):
        self.project_root = project_root or Path(__file__).parent.parent.parent
        self.rules_dir = self.project_root / "rules"
        self.analysis_results = {}
        self.conflicts = []
        self.duplicates = []
        self.optimization_suggestions = []
        
    def analyze_all_conflicts(self):
        """执行完整的冲突分析"""
        
        print("🔍 启动智能规则冲突分析引擎...")
        print("=" * 80)
        
        # Phase 1: 扫描所有规则文件
        all_rules = self._scan_all_rule_files()
        print(f"📊 发现 {len(all_rules)} 个规则文件")
        
        # Phase 2: 检测重复内容
        duplicates = self._detect_duplicates(all_rules)
        print(f"🔍 发现 {len(duplicates)} 组重复内容")
        
        # Phase 3: 检测语义冲突
        conflicts = self._detect_conflicts(all_rules)
        print(f"⚡ 发现 {len(conflicts)} 个潜在冲突")
        
        # Phase 4: 检测结构冗余
        redundancies = self._detect_redundancies(all_rules)
        print(f"📦 发现 {len(redundancies)} 个冗余结构")
        
        # Phase 5: 生成优化建议
        optimizations = self._generate_optimization_solutions(duplicates, conflicts, redundancies)
        print(f"💡 生成 {len(optimizations)} 个优化建议")
        
        # Phase 6: 展示分析结果
        self._display_analysis_results(duplicates, conflicts, redundancies, optimizations)
        
        return {
            "duplicates": duplicates,
            "conflicts": conflicts,
            "redundancies": redundancies,
            "optimizations": optimizations
        }
    
    def _scan_all_rule_files(self) -> Dict[str, Dict[str, Any]]:
        """扫描所有规则文件"""
        
        all_rules = {}
        
        for category_dir in ["core", "system", "evolution", "validation"]:
            category_path = self.rules_dir / category_dir
            if not category_path.exists():
                continue
                
            for rule_file in category_path.glob("*.yaml"):
                file_key = f"{category_dir}/{rule_file.name}"
                
                try:
                    with open(rule_file, 'r', encoding='utf-8') as f:
                        content = yaml.safe_load(f)
                    
                    all_rules[file_key] = {
                        "path": rule_file,
                        "category": category_dir,
                        "filename": rule_file.name,
                        "content": content,
                        "size": rule_file.stat().st_size,
                        "lines": len(open(rule_file, 'r', encoding='utf-8').readlines()),
                        "content_text": str(content).lower()
                    }
                    
                except Exception as e:
                    print(f"⚠️ 无法解析 {file_key}: {str(e)}")
                    
        return all_rules
    
    def _detect_duplicates(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测重复内容"""
        
        duplicates = []
        
        # 1. 检测完全相同的内容块
        content_groups = defaultdict(list)
        for file_key, rule_data in all_rules.items():
            content_hash = hash(str(rule_data["content"]))
            content_groups[content_hash].append(file_key)
        
        for content_hash, file_list in content_groups.items():
            if len(file_list) > 1:
                duplicates.append({
                    "type": "identical_content",
                    "severity": "high",
                    "files": file_list,
                    "description": f"完全相同的内容出现在 {len(file_list)} 个文件中"
                })
        
        # 2. 检测相似的键值对
        similar_blocks = self._find_similar_content_blocks(all_rules)
        duplicates.extend(similar_blocks)
        
        # 3. 检测文件名相似性
        name_similarities = self._detect_similar_filenames(all_rules)
        duplicates.extend(name_similarities)
        
        return duplicates
    
    def _find_similar_content_blocks(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """查找相似的内容块"""
        
        similar_blocks = []
        
        # 提取所有文件的主要配置项
        config_patterns = {}
        for file_key, rule_data in all_rules.items():
            patterns = self._extract_config_patterns(rule_data["content"])
            config_patterns[file_key] = patterns
        
        # 比较配置模式相似度
        file_keys = list(config_patterns.keys())
        for i in range(len(file_keys)):
            for j in range(i + 1, len(file_keys)):
                file1, file2 = file_keys[i], file_keys[j]
                similarity = self._calculate_pattern_similarity(
                    config_patterns[file1], 
                    config_patterns[file2]
                )
                
                if similarity > 0.7:  # 70% 相似度阈值
                    similar_blocks.append({
                        "type": "similar_content",
                        "severity": "medium",
                        "files": [file1, file2],
                        "similarity": similarity,
                        "description": f"内容相似度 {similarity:.1%}，可能有重复配置"
                    })
        
        return similar_blocks
    
    def _extract_config_patterns(self, content: Any) -> Set[str]:
        """提取配置模式"""
        
        patterns = set()
        
        def extract_recursive(data, prefix=""):
            if isinstance(data, dict):
                for key, value in data.items():
                    current_key = f"{prefix}.{key}" if prefix else key
                    patterns.add(current_key)
                    if isinstance(value, (dict, list)):
                        extract_recursive(value, current_key)
            elif isinstance(data, list):
                for item in data:
                    if isinstance(item, (dict, list)):
                        extract_recursive(item, prefix)
        
        extract_recursive(content)
        return patterns
    
    def _calculate_pattern_similarity(self, patterns1: Set[str], patterns2: Set[str]) -> float:
        """计算模式相似度"""
        
        if not patterns1 or not patterns2:
            return 0.0
        
        intersection = patterns1.intersection(patterns2)
        union = patterns1.union(patterns2)
        
        return len(intersection) / len(union) if union else 0.0
    
    def _detect_similar_filenames(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测相似的文件名"""
        
        similarities = []
        
        # 特别检查明显的重复文件名模式
        problem_pairs = [
            ("system/enterprise_project_management.yaml", "system/project_management.yaml"),
            ("evolution/evolution_management.yaml", "evolution/practice_driven_evolution_rules.yaml"),
        ]
        
        for file1, file2 in problem_pairs:
            if file1 in all_rules and file2 in all_rules:
                similarities.append({
                    "type": "similar_filename",
                    "severity": "high",
                    "files": [file1, file2],
                    "description": f"文件名相似，可能功能重叠：{file1} vs {file2}"
                })
        
        return similarities
    
    def _detect_conflicts(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测语义冲突"""
        
        conflicts = []
        
        # 1. 检测版本冲突
        version_conflicts = self._detect_version_conflicts(all_rules)
        conflicts.extend(version_conflicts)
        
        # 2. 检测配置冲突
        config_conflicts = self._detect_configuration_conflicts(all_rules)
        conflicts.extend(config_conflicts)
        
        # 3. 检测职责边界冲突
        responsibility_conflicts = self._detect_responsibility_conflicts(all_rules)
        conflicts.extend(responsibility_conflicts)
        
        return conflicts
    
    def _detect_version_conflicts(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测版本冲突"""
        
        conflicts = []
        
        # 检查相同功能的不同版本
        for file_key, rule_data in all_rules.items():
            content = rule_data["content"]
            if isinstance(content, dict) and "rule_metadata" in content:
                metadata = content["rule_metadata"]
                if "version" in metadata:
                    # 检查是否有其他文件定义了相同的功能但版本不同
                    pass  # 这里可以扩展版本冲突检测逻辑
        
        return conflicts
    
    def _detect_configuration_conflicts(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测配置冲突"""
        
        conflicts = []
        
        # 检查矛盾的配置值
        config_values = defaultdict(dict)
        
        for file_key, rule_data in all_rules.items():
            self._collect_config_values(rule_data["content"], config_values, file_key)
        
        # 寻找相同配置项的不同值
        for config_path, file_values in config_values.items():
            if len(file_values) > 1:
                unique_values = set(file_values.values())
                if len(unique_values) > 1:
                    conflicts.append({
                        "type": "configuration_conflict",
                        "severity": "medium",
                        "config_path": config_path,
                        "conflicting_values": dict(file_values),
                        "description": f"配置项 '{config_path}' 在不同文件中有不同的值"
                    })
        
        return conflicts
    
    def _collect_config_values(self, data: Any, config_values: Dict, file_key: str, prefix: str = ""):
        """递归收集配置值"""
        
        if isinstance(data, dict):
            for key, value in data.items():
                current_path = f"{prefix}.{key}" if prefix else key
                if isinstance(value, (str, int, float, bool)):
                    config_values[current_path][file_key] = value
                elif isinstance(value, (dict, list)):
                    self._collect_config_values(value, config_values, file_key, current_path)
    
    def _detect_responsibility_conflicts(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测职责边界冲突"""
        
        conflicts = []
        
        # 检查功能职责重叠
        responsibility_keywords = {
            "project_management": ["project", "management", "planning", "tracking"],
            "evolution": ["evolution", "improvement", "feedback", "optimization"],
            "security": ["security", "auth", "encryption", "protection"],
            "testing": ["test", "qa", "validation", "verification"],
            "performance": ["performance", "monitoring", "metrics", "optimization"]
        }
        
        file_responsibilities = {}
        for file_key, rule_data in all_rules.items():
            content_text = rule_data["content_text"]
            file_responsibilities[file_key] = []
            
            for responsibility, keywords in responsibility_keywords.items():
                if any(keyword in content_text for keyword in keywords):
                    file_responsibilities[file_key].append(responsibility)
        
        # 寻找职责重叠
        responsibility_files = defaultdict(list)
        for file_key, responsibilities in file_responsibilities.items():
            for responsibility in responsibilities:
                responsibility_files[responsibility].append(file_key)
        
        for responsibility, files in responsibility_files.items():
            if len(files) > 1:
                # 检查是否真的冲突（有些重叠是正常的）
                if responsibility == "project_management" and len(files) == 2:
                    conflicts.append({
                        "type": "responsibility_overlap",
                        "severity": "high",
                        "responsibility": responsibility,
                        "files": files,
                        "description": f"'{responsibility}' 职责在多个文件中定义，可能造成混淆"
                    })
        
        return conflicts
    
    def _detect_redundancies(self, all_rules: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
        """检测结构冗余"""
        
        redundancies = []
        
        # 1. 检测过大的文件
        large_files = []
        for file_key, rule_data in all_rules.items():
            if rule_data["lines"] > 600:  # 超过600行认为过大
                large_files.append({
                    "type": "oversized_file",
                    "severity": "medium",
                    "file": file_key,
                    "lines": rule_data["lines"],
                    "description": f"文件过大 ({rule_data['lines']} 行)，建议拆分"
                })
        
        redundancies.extend(large_files)
        
        # 2. 检测可合并的小文件
        small_files = []
        for file_key, rule_data in all_rules.items():
            if rule_data["lines"] < 100:  # 少于100行认为较小
                small_files.append((file_key, rule_data))
        
        if len(small_files) > 3:
            redundancies.append({
                "type": "fragmented_files",
                "severity": "low",
                "files": [f[0] for f in small_files],
                "description": f"发现 {len(small_files)} 个小文件，可考虑合并"
            })
        
        return redundancies
    
    def _generate_optimization_solutions(self, duplicates: List, conflicts: List, redundancies: List) -> List[Dict[str, Any]]:
        """生成优化解决方案"""
        
        optimizations = []
        
        # 1. 针对重复内容的解决方案
        for duplicate in duplicates:
            if duplicate["type"] == "similar_filename" and duplicate["severity"] == "high":
                if "project_management" in duplicate["description"]:
                    optimizations.append({
                        "type": "merge_duplicate_files",
                        "priority": "high",
                        "title": "合并项目管理规则文件",
                        "description": "将 enterprise_project_management.yaml 和 project_management.yaml 合并",
                        "action": "merge_project_management_files",
                        "target_files": duplicate["files"],
                        "expected_result": "统一项目管理规则，消除重复和混淆"
                    })
        
        # 2. 针对演进文件的优化
        evolution_files = []
        for duplicate in duplicates:
            if any("evolution" in f for f in duplicate.get("files", [])):
                evolution_files.extend(duplicate["files"])
        
        if evolution_files:
            optimizations.append({
                "type": "reorganize_evolution_rules",
                "priority": "medium",
                "title": "重组演进规则结构",
                "description": "优化演进相关规则的组织结构",
                "action": "reorganize_evolution_files",
                "target_files": list(set(evolution_files)),
                "expected_result": "清晰的演进规则层次和职责分工"
            })
        
        # 3. 针对过大文件的解决方案
        for redundancy in redundancies:
            if redundancy["type"] == "oversized_file" and redundancy["lines"] > 700:
                optimizations.append({
                    "type": "split_large_file",
                    "priority": "medium",
                    "title": f"拆分大型规则文件",
                    "description": f"将 {redundancy['file']} ({redundancy['lines']} 行) 拆分为多个专门文件",
                    "action": "split_file",
                    "target_files": [redundancy["file"]],
                    "expected_result": "提升可维护性和可读性"
                })
        
        # 4. 针对配置冲突的解决方案
        for conflict in conflicts:
            if conflict["type"] == "responsibility_overlap":
                optimizations.append({
                    "type": "clarify_responsibilities",
                    "priority": "high",
                    "title": "明确职责边界",
                    "description": f"明确 '{conflict['responsibility']}' 的职责分工",
                    "action": "clarify_responsibility_boundaries",
                    "target_files": conflict["files"],
                    "expected_result": "消除职责重叠，提高规则清晰度"
                })
        
        return optimizations
    
    def _display_analysis_results(self, duplicates: List, conflicts: List, redundancies: List, optimizations: List):
        """显示分析结果"""
        
        print("\n" + "=" * 80)
        print("📊 智能规则冲突分析结果")
        print("=" * 80)
        
        # 显示重复内容
        if duplicates:
            print(f"\n🔍 发现的重复内容 ({len(duplicates)}个):")
            for i, dup in enumerate(duplicates, 1):
                severity_icon = "🔴" if dup["severity"] == "high" else "🟡" if dup["severity"] == "medium" else "🟢"
                print(f"  {i}. {severity_icon} {dup['description']}")
                if "files" in dup:
                    for file in dup["files"]:
                        print(f"     📁 {file}")
                print()
        
        # 显示冲突
        if conflicts:
            print(f"\n⚡ 发现的冲突 ({len(conflicts)}个):")
            for i, conflict in enumerate(conflicts, 1):
                severity_icon = "🔴" if conflict["severity"] == "high" else "🟡" if conflict["severity"] == "medium" else "🟢"
                print(f"  {i}. {severity_icon} {conflict['description']}")
                print()
        
        # 显示冗余
        if redundancies:
            print(f"\n📦 发现的冗余 ({len(redundancies)}个):")
            for i, redundancy in enumerate(redundancies, 1):
                severity_icon = "🔴" if redundancy["severity"] == "high" else "🟡" if redundancy["severity"] == "medium" else "🟢"
                print(f"  {i}. {severity_icon} {redundancy['description']}")
                print()
        
        # 显示优化建议
        if optimizations:
            print(f"\n💡 优化建议 ({len(optimizations)}个):")
            for i, opt in enumerate(optimizations, 1):
                priority_icon = "🔥" if opt["priority"] == "high" else "⚡" if opt["priority"] == "medium" else "💡"
                print(f"  {i}. {priority_icon} {opt['title']}")
                print(f"     {opt['description']}")
                print(f"     预期效果: {opt['expected_result']}")
                print()
    
    def execute_optimization(self, optimization_action: str):
        """执行具体的优化操作"""
        
        if optimization_action == "merge_project_management_files":
            return self._merge_project_management_files()
        elif optimization_action == "reorganize_evolution_files":
            return self._reorganize_evolution_files()
        elif optimization_action == "split_file":
            return self._split_large_files()
        else:
            return {"success": False, "message": f"未知的优化操作: {optimization_action}"}
    
    def _merge_project_management_files(self) -> Dict[str, Any]:
        """合并项目管理文件"""
        
        enterprise_file = self.rules_dir / "system" / "enterprise_project_management.yaml"
        basic_file = self.rules_dir / "system" / "project_management.yaml"
        
        if not enterprise_file.exists() or not basic_file.exists():
            return {"success": False, "message": "目标文件不存在"}
        
        try:
            # 读取两个文件的内容
            with open(enterprise_file, 'r', encoding='utf-8') as f:
                enterprise_content = yaml.safe_load(f)
            
            with open(basic_file, 'r', encoding='utf-8') as f:
                basic_content = yaml.safe_load(f)
            
            # 合并内容（这里可以实现更智能的合并逻辑）
            merged_content = self._intelligent_merge_yaml(enterprise_content, basic_content)
            
            # 备份原文件
            backup_dir = self.rules_dir / "backup" / datetime.now().strftime("%Y%m%d_%H%M%S")
            backup_dir.mkdir(parents=True, exist_ok=True)
            
            # 写入合并后的文件
            with open(enterprise_file, 'w', encoding='utf-8') as f:
                yaml.dump(merged_content, f, default_flow_style=False, allow_unicode=True, indent=2)
            
            # 删除基础文件
            basic_file.unlink()
            
            return {
                "success": True, 
                "message": "项目管理文件已成功合并",
                "details": {
                    "merged_file": str(enterprise_file),
                    "removed_file": str(basic_file),
                    "backup_location": str(backup_dir)
                }
            }
            
        except Exception as e:
            return {"success": False, "message": f"合并失败: {str(e)}"}
    
    def _intelligent_merge_yaml(self, primary: Dict, secondary: Dict) -> Dict:
        """智能合并YAML内容"""
        
        # 以enterprise版本为主，补充basic版本的独有内容
        merged = primary.copy()
        
        # 递归合并，避免覆盖重要内容
        def merge_recursive(dest: Dict, src: Dict):
            for key, value in src.items():
                if key not in dest:
                    dest[key] = value
                elif isinstance(dest[key], dict) and isinstance(value, dict):
                    merge_recursive(dest[key], value)
                # 如果dest已有此key且不是dict，保持dest的值（enterprise优先）
        
        merge_recursive(merged, secondary)
        return merged
    
    def _reorganize_evolution_files(self) -> Dict[str, Any]:
        """重组演进文件"""
        # 这里可以实现演进文件的重组逻辑
        return {"success": True, "message": "演进文件重组完成"}
    
    def _split_large_files(self) -> Dict[str, Any]:
        """拆分大文件"""
        # 这里可以实现大文件拆分逻辑
        return {"success": True, "message": "大文件拆分完成"}

def main():
    """主程序"""
    print("🔍 intelligent_rules_v2.0 智能规则冲突分析")
    print("基于智能闭环演进的深度内容分析")
    print()
    
    analyzer = RulesConflictAnalyzer()
    results = analyzer.analyze_all_conflicts()
    
    # 自动执行高优先级优化
    if results["optimizations"]:
        print("\n🚀 执行自动优化...")
        high_priority_optimizations = [opt for opt in results["optimizations"] if opt["priority"] == "high"]
        
        for optimization in high_priority_optimizations:
            print(f"\n⚡ 执行: {optimization['title']}")
            result = analyzer.execute_optimization(optimization["action"])
            
            if result["success"]:
                print(f"  ✅ {result['message']}")
                if "details" in result:
                    for key, value in result["details"].items():
                        print(f"     {key}: {value}")
            else:
                print(f"  ❌ {result['message']}")
    
    print("\n🎉 智能规则冲突分析完成！")

if __name__ == "__main__":
    main() 