"""
优化建议应用引擎
整合所有分析器，协调优化流程，应用优化建议
"""

import asyncio
import uuid
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from enum import Enum
import logging
import json
import redis
import pickle

from .structure_analyzer import StructureAnalyzer, StructureAnalysisResult
from .coherence_checker import CoherenceChecker, CoherenceAnalysisResult
from .fluency_evaluator import FluencyEvaluator, FluencyAnalysisResult
from .keyword_analyzer import KeywordAnalyzer, KeywordAnalysisResult
from .title_optimizer import TitleOptimizer, TitleOptimizationResult

logger = logging.getLogger(__name__)


class OptimizationType(Enum):
    """优化类型"""
    STRUCTURE = "structure"
    COHERENCE = "coherence"
    FLUENCY = "fluency"
    KEYWORD = "keyword"
    TITLE = "title"
    SEO = "seo"
    ALL = "all"


class OptimizationStatus(Enum):
    """优化状态"""
    PENDING = "pending"
    ANALYZING = "analyzing"
    COMPLETED = "completed"
    FAILED = "failed"
    APPLIED = "applied"


@dataclass
class OptimizationSuggestion:
    """统一的优化建议"""
    id: str
    type: OptimizationType
    priority: str  # high, medium, low
    description: str
    detail: str
    location: Optional[str] = None
    original_text: Optional[str] = None
    suggested_text: Optional[str] = None
    impact_score: float = 0.0
    auto_applicable: bool = False
    applied: bool = False


@dataclass
class OptimizationHistory:
    """优化历史记录"""
    timestamp: datetime
    suggestion_id: str
    action: str  # applied, rejected, undone
    original_content: str
    modified_content: str


@dataclass
class ComprehensiveAnalysis:
    """综合分析结果"""
    task_id: str
    status: OptimizationStatus
    content_length: int
    overall_score: float
    structure_result: Optional[StructureAnalysisResult] = None
    coherence_result: Optional[CoherenceAnalysisResult] = None
    fluency_result: Optional[FluencyAnalysisResult] = None
    keyword_result: Optional[KeywordAnalysisResult] = None
    title_result: Optional[TitleOptimizationResult] = None
    all_suggestions: List[OptimizationSuggestion] = field(default_factory=list)
    prioritized_suggestions: List[OptimizationSuggestion] = field(default_factory=list)
    optimization_history: List[OptimizationHistory] = field(default_factory=list)
    created_at: datetime = field(default_factory=datetime.now)
    updated_at: datetime = field(default_factory=datetime.now)


class OptimizationEngine:
    """优化引擎"""
    
    def __init__(self, redis_url: str = None):
        """初始化引擎"""
        # 初始化各个分析器
        self.structure_analyzer = StructureAnalyzer()
        self.coherence_checker = CoherenceChecker()
        self.fluency_evaluator = FluencyEvaluator()
        self.keyword_analyzer = KeywordAnalyzer()
        self.title_optimizer = TitleOptimizer()
        
        # 缓存分析结果
        self.analysis_cache: Dict[str, ComprehensiveAnalysis] = {}
        
        # Redis缓存配置
        self.redis_client = None
        self.cache_ttl = timedelta(hours=24)  # 24小时缓存
        if redis_url:
            try:
                self.redis_client = redis.from_url(redis_url)
                self.redis_client.ping()
                logger.info("Redis cache connected successfully")
            except Exception as e:
                logger.warning(f"Redis connection failed, using memory cache only: {e}")
                self.redis_client = None
        
        # 优化权重配置
        self.weights = {
            OptimizationType.STRUCTURE: 0.25,
            OptimizationType.COHERENCE: 0.20,
            OptimizationType.FLUENCY: 0.20,
            OptimizationType.KEYWORD: 0.20,
            OptimizationType.TITLE: 0.15
        }
        
        # 自动优化规则
        self.auto_optimization_rules = {
            'remove_duplicate_punctuation': r'([，。！？；])\1+',
            'fix_chinese_de': r'的的',
            'fix_chinese_le': r'了了',
            'fix_chinese_shi': r'是是',
            'add_period_at_end': r'[^。！？]$',
            'remove_extra_spaces': r'\s{2,}',
            'fix_english_chinese_space': r'([a-zA-Z])([\\u4e00-\\u9fff])'
        }
    
    async def analyze(self, content: str, title: str = "",
                     optimization_types: List[OptimizationType] = None,
                     keywords: List[str] = None) -> ComprehensiveAnalysis:
        """
        执行综合分析
        
        Args:
            content: 文章内容
            title: 文章标题
            optimization_types: 要执行的优化类型
            keywords: 目标关键词
            
        Returns:
            综合分析结果
        """
        # 生成任务ID
        task_id = str(uuid.uuid4())
        
        # 创建分析结果对象
        analysis = ComprehensiveAnalysis(
            task_id=task_id,
            status=OptimizationStatus.ANALYZING,
            content_length=len(content),
            overall_score=0.0
        )
        
        # 确定要执行的优化类型
        if not optimization_types:
            optimization_types = [OptimizationType.ALL]
        
        if OptimizationType.ALL in optimization_types:
            optimization_types = [
                OptimizationType.STRUCTURE,
                OptimizationType.COHERENCE,
                OptimizationType.FLUENCY,
                OptimizationType.KEYWORD,
                OptimizationType.TITLE
            ]
        
        # 并行执行分析
        tasks = []
        
        if OptimizationType.STRUCTURE in optimization_types:
            tasks.append(self._analyze_structure(content))
        
        if OptimizationType.COHERENCE in optimization_types:
            tasks.append(self._analyze_coherence(content))
        
        if OptimizationType.FLUENCY in optimization_types:
            tasks.append(self._analyze_fluency(content))
        
        if OptimizationType.KEYWORD in optimization_types:
            tasks.append(self._analyze_keywords(content))
        
        if OptimizationType.TITLE in optimization_types and title:
            tasks.append(self._analyze_title(title, content, keywords))
        
        # 等待所有分析完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理分析结果
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                logger.error(f"Analysis failed: {result}")
                continue
            
            if isinstance(result, StructureAnalysisResult):
                analysis.structure_result = result
            elif isinstance(result, CoherenceAnalysisResult):
                analysis.coherence_result = result
            elif isinstance(result, FluencyAnalysisResult):
                analysis.fluency_result = result
            elif isinstance(result, KeywordAnalysisResult):
                analysis.keyword_result = result
            elif isinstance(result, TitleOptimizationResult):
                analysis.title_result = result
        
        # 整合所有建议
        analysis.all_suggestions = self._merge_suggestions(analysis)
        
        # 优先级排序
        analysis.prioritized_suggestions = self._prioritize_suggestions(
            analysis.all_suggestions
        )
        
        # 计算综合评分
        analysis.overall_score = self._calculate_overall_score(analysis)
        
        # 更新状态
        analysis.status = OptimizationStatus.COMPLETED
        analysis.updated_at = datetime.now()
        
        # 缓存结果到内存
        self.analysis_cache[task_id] = analysis
        
        # 缓存到Redis
        self._cache_to_redis(task_id, analysis)
        
        return analysis
    
    async def _analyze_structure(self, content: str) -> StructureAnalysisResult:
        """异步执行结构分析"""
        return await asyncio.to_thread(self.structure_analyzer.analyze, content)
    
    async def _analyze_coherence(self, content: str) -> CoherenceAnalysisResult:
        """异步执行连贯性分析"""
        return await asyncio.to_thread(self.coherence_checker.check, content)
    
    async def _analyze_fluency(self, content: str) -> FluencyAnalysisResult:
        """异步执行流畅度分析"""
        return await asyncio.to_thread(self.fluency_evaluator.evaluate, content)
    
    async def _analyze_keywords(self, content: str) -> KeywordAnalysisResult:
        """异步执行关键词分析"""
        return await asyncio.to_thread(self.keyword_analyzer.analyze, content)
    
    async def _analyze_title(self, title: str, content: str,
                            keywords: List[str] = None) -> TitleOptimizationResult:
        """异步执行标题优化"""
        return await asyncio.to_thread(
            self.title_optimizer.optimize, title, content, keywords
        )
    
    def _merge_suggestions(self, analysis: ComprehensiveAnalysis) -> List[OptimizationSuggestion]:
        """整合各分析器的建议"""
        suggestions = []
        
        # 结构建议
        if analysis.structure_result:
            for sug in analysis.structure_result.suggestions:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.STRUCTURE,
                    priority=sug.get('priority', 'medium'),
                    description=sug.get('description', ''),
                    detail=sug.get('detail', ''),
                    impact_score=sug.get('impact', 0.0),
                    auto_applicable=False
                ))
        
        # 连贯性建议
        if analysis.coherence_result:
            for sug in analysis.coherence_result.suggestions:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.COHERENCE,
                    priority=sug.get('priority', 'medium'),
                    description=sug.get('description', ''),
                    detail=sug.get('detail', ''),
                    location=sug.get('location'),
                    impact_score=sug.get('impact', 0.0),
                    auto_applicable=False
                ))
        
        # 流畅度建议
        if analysis.fluency_result:
            for sug in analysis.fluency_result.suggestions:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.FLUENCY,
                    priority=sug.get('priority', 'medium'),
                    description=sug.get('description', ''),
                    detail=sug.get('detail', ''),
                    impact_score=sug.get('impact', 0.0),
                    auto_applicable=False
                ))
            
            # 重写建议
            for rewrite in analysis.fluency_result.rewrite_suggestions:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.FLUENCY,
                    priority='high',
                    description=rewrite.get('reason', '句子优化'),
                    detail=rewrite.get('improvement', ''),
                    original_text=rewrite.get('original'),
                    suggested_text=rewrite.get('suggested'),
                    impact_score=0.15,
                    auto_applicable=True
                ))
        
        # 关键词建议
        if analysis.keyword_result:
            for sug in analysis.keyword_result.suggestions:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.KEYWORD,
                    priority=sug.get('priority', 'medium'),
                    description=sug.get('description', ''),
                    detail=sug.get('detail', ''),
                    impact_score=sug.get('impact', 0.0),
                    auto_applicable=False
                ))
        
        # 标题建议
        if analysis.title_result:
            for sug in analysis.title_result.suggestions:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.TITLE,
                    priority=sug.get('priority', 'medium'),
                    description=sug.get('description', ''),
                    detail=sug.get('detail', ''),
                    impact_score=sug.get('impact', 0.0),
                    auto_applicable=False
                ))
            
            # 标题变体建议
            for variant in analysis.title_result.variants[:3]:
                suggestions.append(OptimizationSuggestion(
                    id=str(uuid.uuid4()),
                    type=OptimizationType.TITLE,
                    priority='medium',
                    description=f"标题变体（{variant.style}风格）",
                    detail=', '.join(variant.improvements),
                    original_text=analysis.title_result.analysis.original_title,
                    suggested_text=variant.title,
                    impact_score=variant.score * 0.2,
                    auto_applicable=True
                ))
        
        return suggestions
    
    def _prioritize_suggestions(self, 
                               suggestions: List[OptimizationSuggestion]) -> List[OptimizationSuggestion]:
        """对建议进行优先级排序"""
        # 定义优先级权重
        priority_weights = {
            'high': 3,
            'medium': 2,
            'low': 1
        }
        
        # 计算综合分数
        for sug in suggestions:
            priority_score = priority_weights.get(sug.priority, 1)
            type_weight = self.weights.get(sug.type, 0.1)
            sug.score = priority_score * type_weight + sug.impact_score
        
        # 按分数排序
        sorted_suggestions = sorted(suggestions, 
                                  key=lambda x: x.score, reverse=True)
        
        return sorted_suggestions
    
    def _calculate_overall_score(self, analysis: ComprehensiveAnalysis) -> float:
        """计算综合评分"""
        scores = []
        weights = []
        
        if analysis.structure_result:
            scores.append(analysis.structure_result.structure_score)
            weights.append(self.weights[OptimizationType.STRUCTURE])
        
        if analysis.coherence_result:
            scores.append(analysis.coherence_result.overall_score)
            weights.append(self.weights[OptimizationType.COHERENCE])
        
        if analysis.fluency_result:
            scores.append(analysis.fluency_result.overall_score)
            weights.append(self.weights[OptimizationType.FLUENCY])
        
        if analysis.keyword_result:
            # 计算关键词得分
            keyword_score = 1.0
            for issue in analysis.keyword_result.issues:
                if issue.severity == 'high':
                    keyword_score -= 0.2
                elif issue.severity == 'medium':
                    keyword_score -= 0.1
            scores.append(max(0, keyword_score))
            weights.append(self.weights[OptimizationType.KEYWORD])
        
        if analysis.title_result:
            title_score = (
                analysis.title_result.attractiveness_score * 0.5 +
                analysis.title_result.seo_score * 0.5
            )
            scores.append(title_score)
            weights.append(self.weights[OptimizationType.TITLE])
        
        if not scores:
            return 0.0
        
        # 加权平均
        total_weight = sum(weights)
        if total_weight == 0:
            return 0.0
        
        weighted_score = sum(s * w for s, w in zip(scores, weights)) / total_weight
        
        return min(1.0, max(0.0, weighted_score))
    
    def apply_optimization(self, task_id: str, suggestion_id: str,
                          content: str, custom_text: str = None) -> Tuple[str, OptimizationHistory]:
        """
        应用单个优化建议
        
        Args:
            task_id: 任务ID
            suggestion_id: 建议ID
            content: 原始内容
            custom_text: 自定义替换文本
            
        Returns:
            (优化后的内容, 历史记录)
        """
        # 获取分析结果
        analysis = self.analysis_cache.get(task_id)
        if not analysis:
            raise ValueError(f"Task {task_id} not found")
        
        # 查找建议
        suggestion = None
        for sug in analysis.all_suggestions:
            if sug.id == suggestion_id:
                suggestion = sug
                break
        
        if not suggestion:
            raise ValueError(f"Suggestion {suggestion_id} not found")
        
        # 应用优化
        modified_content = content
        
        if suggestion.auto_applicable and suggestion.original_text:
            # 自动替换
            if custom_text:
                modified_content = content.replace(
                    suggestion.original_text, custom_text
                )
            elif suggestion.suggested_text:
                modified_content = content.replace(
                    suggestion.original_text, suggestion.suggested_text
                )
        
        # 记录历史
        history = OptimizationHistory(
            timestamp=datetime.now(),
            suggestion_id=suggestion_id,
            action='applied',
            original_content=content[:100],  # 只记录前100字符
            modified_content=modified_content[:100]
        )
        
        # 更新建议状态
        suggestion.applied = True
        
        # 添加到历史记录
        analysis.optimization_history.append(history)
        analysis.updated_at = datetime.now()
        
        return modified_content, history
    
    def batch_apply_optimizations(self, task_id: str, suggestion_ids: List[str],
                                 content: str) -> Tuple[str, List[OptimizationHistory]]:
        """
        批量应用优化建议
        
        Args:
            task_id: 任务ID
            suggestion_ids: 建议ID列表
            content: 原始内容
            
        Returns:
            (优化后的内容, 历史记录列表)
        """
        modified_content = content
        history_records = []
        
        for suggestion_id in suggestion_ids:
            try:
                modified_content, history = self.apply_optimization(
                    task_id, suggestion_id, modified_content
                )
                history_records.append(history)
            except Exception as e:
                logger.error(f"Failed to apply suggestion {suggestion_id}: {e}")
        
        return modified_content, history_records
    
    def auto_optimize(self, content: str) -> str:
        """
        自动优化（应用基本规则）
        
        Args:
            content: 原始内容
            
        Returns:
            优化后的内容
        """
        import re
        
        optimized = content
        
        # 应用自动优化规则
        for rule_name, pattern in self.auto_optimization_rules.items():
            if rule_name == 'remove_duplicate_punctuation':
                optimized = re.sub(pattern, r'\1', optimized)
            elif rule_name == 'fix_chinese_de':
                optimized = re.sub(pattern, '的', optimized)
            elif rule_name == 'fix_chinese_le':
                optimized = re.sub(pattern, '了', optimized)
            elif rule_name == 'fix_chinese_shi':
                optimized = re.sub(pattern, '是', optimized)
            elif rule_name == 'add_period_at_end':
                if not optimized.endswith(('。', '！', '？')):
                    optimized += '。'
            elif rule_name == 'remove_extra_spaces':
                optimized = re.sub(pattern, ' ', optimized)
            elif rule_name == 'fix_english_chinese_space':
                optimized = re.sub(pattern, r'\1 \2', optimized)
        
        return optimized
    
    def undo_optimization(self, task_id: str, history_id: str) -> bool:
        """
        撤销优化
        
        Args:
            task_id: 任务ID
            history_id: 历史记录ID
            
        Returns:
            是否成功
        """
        analysis = self.analysis_cache.get(task_id)
        if not analysis:
            return False
        
        # 查找并标记为撤销
        for history in analysis.optimization_history:
            if history.suggestion_id == history_id:
                # 创建撤销记录
                undo_history = OptimizationHistory(
                    timestamp=datetime.now(),
                    suggestion_id=history_id,
                    action='undone',
                    original_content=history.modified_content,
                    modified_content=history.original_content
                )
                analysis.optimization_history.append(undo_history)
                
                # 更新建议状态
                for sug in analysis.all_suggestions:
                    if sug.id == history_id:
                        sug.applied = False
                        break
                
                return True
        
        return False
    
    def _cache_to_redis(self, task_id: str, analysis: ComprehensiveAnalysis):
        """缓存分析结果到Redis"""
        if not self.redis_client:
            return
        
        try:
            # 序列化分析结果
            cache_data = pickle.dumps(analysis)
            # 设置缓存，带TTL
            self.redis_client.setex(
                f"optimization:{task_id}",
                self.cache_ttl,
                cache_data
            )
            logger.debug(f"Cached analysis {task_id} to Redis")
        except Exception as e:
            logger.error(f"Failed to cache to Redis: {e}")
    
    def _get_from_redis(self, task_id: str) -> Optional[ComprehensiveAnalysis]:
        """从Redis获取缓存的分析结果"""
        if not self.redis_client:
            return None
        
        try:
            cache_data = self.redis_client.get(f"optimization:{task_id}")
            if cache_data:
                analysis = pickle.loads(cache_data)
                logger.debug(f"Retrieved analysis {task_id} from Redis cache")
                return analysis
        except Exception as e:
            logger.error(f"Failed to get from Redis: {e}")
        
        return None
    
    def export_analysis(self, task_id: str) -> Dict[str, Any]:
        """
        导出分析结果
        
        Args:
            task_id: 任务ID
            
        Returns:
            分析结果字典
        """
        # 先检查内存缓存
        analysis = self.analysis_cache.get(task_id)
        
        # 如果内存中没有，尝试从Redis获取
        if not analysis:
            analysis = self._get_from_redis(task_id)
        if not analysis:
            return {}
        
        return {
            'task_id': analysis.task_id,
            'status': analysis.status.value,
            'overall_score': analysis.overall_score,
            'content_length': analysis.content_length,
            'created_at': analysis.created_at.isoformat(),
            'updated_at': analysis.updated_at.isoformat(),
            'scores': {
                'structure': analysis.structure_result.structure_score if analysis.structure_result else None,
                'coherence': analysis.coherence_result.overall_score if analysis.coherence_result else None,
                'fluency': analysis.fluency_result.overall_score if analysis.fluency_result else None,
                'keywords': len(analysis.keyword_result.primary_keywords) if analysis.keyword_result else 0,
                'title': analysis.title_result.attractiveness_score if analysis.title_result else None
            },
            'suggestions_count': len(analysis.all_suggestions),
            'high_priority_count': sum(1 for s in analysis.all_suggestions if s.priority == 'high'),
            'applied_count': sum(1 for s in analysis.all_suggestions if s.applied),
            'top_suggestions': [
                {
                    'id': sug.id,
                    'type': sug.type.value,
                    'priority': sug.priority,
                    'description': sug.description,
                    'impact': sug.impact_score,
                    'auto_applicable': sug.auto_applicable,
                    'applied': sug.applied
                }
                for sug in analysis.prioritized_suggestions[:10]
            ]
        }