"""智能报告生成器

基于分析结果生成智能洞察、自动摘要和推荐建议。"""

import logging
from typing import Dict, List, Any, Optional
from datetime import datetime
import json
import pandas as pd
import numpy as np
import random
from dataclasses import dataclass
from .report_templates import ReportTemplates, CONTENT_TEMPLATES, STYLE_TEMPLATES

def convert_numpy_types(obj):
    """将numpy数据类型转换为JSON可序列化的Python原生类型"""
    if obj is None:
        return None
    elif isinstance(obj, (np.integer, np.int64, np.int32, np.int16, np.int8)):
        return int(obj)
    elif isinstance(obj, (np.floating, np.float64, np.float32, np.float16)):
        return float(obj)
    elif isinstance(obj, np.bool_):
        return bool(obj)
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, (pd.Series, pd.Index)):
        return obj.tolist()
    elif isinstance(obj, dict):
        # 处理字典的键，确保键也是可序列化的
        converted_dict = {}
        for key, value in obj.items():
            # 转换键为字符串，处理numpy数据类型的键
            if hasattr(key, 'dtype') or isinstance(key, (np.integer, np.floating, np.bool_)):
                converted_key = str(convert_numpy_types(key))
            else:
                converted_key = str(key)
            converted_dict[converted_key] = convert_numpy_types(value)
        return converted_dict
    elif isinstance(obj, (list, tuple)):
        return [convert_numpy_types(item) for item in obj]
    elif pd.isna(obj):
        return None
    elif hasattr(obj, 'item'):  # 处理numpy标量
        return obj.item()
    elif hasattr(obj, 'dtype'):  # 处理其他numpy类型
        try:
            if np.issubdtype(obj.dtype, np.integer):
                return int(obj)
            elif np.issubdtype(obj.dtype, np.floating):
                return float(obj)
            elif np.issubdtype(obj.dtype, np.bool_):
                return bool(obj)
            else:
                return str(obj)
        except (ValueError, TypeError):
            return str(obj)
    else:
        try:
            # 最后尝试直接转换
            return obj
        except:
            return str(obj)

@dataclass
class Insight:
    """洞察数据结构"""
    title: str
    description: str
    importance: str  # 'high', 'medium', 'low'
    category: str    # 'statistical', 'clustering', 'text', 'trend'
    confidence: float
    data: Dict[str, Any]

class IntelligentReportGenerator:
    """智能报告生成器"""
    
    def __init__(self, config=None):
        self.logger = logging.getLogger(__name__)
        self.config = config
        
        # 洞察模板
        self.insight_templates = {
            'correlation': {
                'high_positive': "发现强正相关关系：{var1} 和 {var2} 之间存在强正相关（相关系数：{corr:.3f}），这表明两个变量趋向于同时增加或减少。",
                'high_negative': "发现强负相关关系：{var1} 和 {var2} 之间存在强负相关（相关系数：{corr:.3f}），这表明一个变量增加时另一个变量趋向于减少。",
                'weak': "发现弱相关关系：{var1} 和 {var2} 之间相关性较弱（相关系数：{corr:.3f}），两个变量之间的线性关系不明显。"
            },
            'clustering': {
                'clear_separation': "聚类分析显示数据可以明确分为 {n_clusters} 个群体，群体间差异明显（轮廓系数：{silhouette:.3f}）。",
                'moderate_separation': "聚类分析识别出 {n_clusters} 个群体，但群体间存在一定重叠（轮廓系数：{silhouette:.3f}）。",
                'poor_separation': "聚类分析结果显示数据群体划分不够清晰（轮廓系数：{silhouette:.3f}），可能需要重新考虑聚类策略。"
            },
            'distribution': {
                'normal': "变量 {var} 呈现正态分布特征，数据分布相对均匀。",
                'skewed_right': "变量 {var} 呈现右偏分布，大部分数据集中在较小值区间。",
                'skewed_left': "变量 {var} 呈现左偏分布，大部分数据集中在较大值区间。",
                'bimodal': "变量 {var} 呈现双峰分布，可能存在两个不同的数据群体。"
            },
            'outliers': {
                'few': "检测到少量异常值（{count} 个，占比 {percentage:.1f}%），对整体分析影响较小。",
                'moderate': "检测到一定数量的异常值（{count} 个，占比 {percentage:.1f}%），建议进一步调查这些数据点。",
                'many': "检测到大量异常值（{count} 个，占比 {percentage:.1f}%），可能影响分析结果的可靠性。"
            },
            'text_sentiment': {
                'positive': "文本情感分析显示整体情感倾向积极（正面情感占比：{positive:.1f}%），反映出良好的用户态度。",
                'negative': "文本情感分析显示整体情感倾向消极（负面情感占比：{negative:.1f}%），需要关注用户关切的问题。",
                'neutral': "文本情感分析显示情感倾向相对中性（中性情感占比：{neutral:.1f}%），用户态度较为客观。",
                'mixed': "文本情感分析显示情感分布较为均匀，正面（{positive:.1f}%）、负面（{negative:.1f}%）和中性（{neutral:.1f}%）情感并存。"
            }
        }
        
        # 推荐模板
        self.recommendation_templates = {
            'data_quality': [
                "建议对异常值进行进一步调查，确认数据的准确性，可以使用箱线图或Z-score方法识别异常值。",
                "考虑增加数据收集的样本量以提高分析的可靠性，建议样本量至少达到当前的1.5倍。",
                "建议对缺失数据较多的变量进行补充收集，或使用插值、回归等方法进行数据填补。",
                "建议建立数据质量监控机制，定期检查数据的完整性、准确性和一致性。",
                "考虑对数据进行标准化或归一化处理，以消除量纲差异对分析结果的影响。"
            ],
            'analysis_depth': [
                "建议进行更深入的子群体分析，探索不同群体的特征差异，可以使用决策树或随机森林方法。",
                "考虑引入更多相关变量进行多元分析，如主成分分析(PCA)或因子分析。",
                "建议进行时间序列分析，观察趋势变化，可以使用ARIMA模型或季节性分解。",
                "建议进行交互效应分析，探索变量间的相互作用对结果的影响。",
                "考虑使用机器学习方法进行预测建模，如随机森林、梯度提升或神经网络。"
            ],
            'actionable': [
                "基于聚类结果，建议制定针对不同群体的差异化策略，为每个群体设计专门的产品或服务方案。",
                "根据相关性分析结果，建议优化相关业务流程，重点关注强相关变量的协同管理。",
                "基于文本分析结果，建议改进产品或服务的相关方面，特别关注用户反馈中的高频关键词。",
                "建议建立数据驱动的决策机制，定期更新分析模型并监控关键指标的变化。",
                "考虑开展A/B测试验证分析结果的有效性，并持续优化策略实施效果。"
            ],
            'business_insights': [
                "建议根据用户群体特征制定个性化的营销策略，提高用户参与度和转化率。",
                "考虑优化产品功能设计，重点关注用户最关心的功能点和痛点。",
                "建议建立用户行为预测模型，提前识别流失风险用户并采取挽留措施。",
                "根据情感分析结果，建议改进客户服务质量，提升用户满意度。",
                "建议建立竞争对手分析框架，定期监控市场变化和竞争态势。"
            ],
            'technical_optimization': [
                "建议优化数据收集流程，确保数据的及时性和准确性。",
                "考虑建立自动化的数据处理管道，提高分析效率和一致性。",
                "建议实施数据可视化仪表板，便于实时监控关键业务指标。",
                "考虑引入更先进的分析工具和算法，提升分析的深度和准确性。",
                "建议建立数据安全和隐私保护机制，确保数据使用的合规性。"
            ]
        }
    
    def generate_insights(self, analysis_results: Dict[str, Any]) -> List[Insight]:
        """
        基于分析结果生成智能洞察
        
        Args:
            analysis_results: 分析结果字典
            
        Returns:
            洞察列表
        """
        insights = []
        
        try:
            # 统计分析洞察
            if 'statistical_analysis' in analysis_results:
                insights.extend(self._generate_statistical_insights(
                    analysis_results['statistical_analysis']
                ))
            
            # 聚类分析洞察
            if 'cluster_analysis' in analysis_results:
                insights.extend(self._generate_clustering_insights(
                    analysis_results['cluster_analysis']
                ))
            
            # 文本分析洞察
            if 'text_analysis' in analysis_results:
                insights.extend(self._generate_text_insights(
                    analysis_results['text_analysis']
                ))
            
            # 数据质量洞察
            if 'data_quality' in analysis_results:
                insights.extend(self._generate_data_quality_insights(
                    analysis_results['data_quality']
                ))
            
            # 按重要性排序
            importance_order = {'high': 3, 'medium': 2, 'low': 1}
            insights.sort(key=lambda x: (importance_order.get(x.importance, 0), x.confidence), reverse=True)
            
            return insights
            
        except Exception as e:
            self.logger.error(f"生成洞察失败: {e}")
            return []
    
    def _generate_statistical_insights(self, stats: Dict[str, Any]) -> List[Insight]:
        """生成统计分析洞察"""
        insights = []
        
        try:
            # 相关性洞察
            if 'correlation_matrix' in stats:
                corr_matrix = np.array(stats['correlation_matrix'])
                variables = stats.get('variables', [f'Var{i}' for i in range(len(corr_matrix))])
                
                # 找出强相关关系
                for i in range(len(corr_matrix)):
                    for j in range(i+1, len(corr_matrix)):
                        corr = corr_matrix[i][j]
                        if abs(corr) > 0.7:  # 强相关
                            template_key = 'high_positive' if corr > 0 else 'high_negative'
                            description = self.insight_templates['correlation'][template_key].format(
                                var1=variables[i], var2=variables[j], corr=corr
                            )
                            
                            insights.append(Insight(
                                title=f"强相关关系：{variables[i]} vs {variables[j]}",
                                description=description,
                                importance='high',
                                category='statistical',
                                confidence=min(abs(corr), 0.95),
                                data={'correlation': corr, 'variables': [variables[i], variables[j]]}
                            ))
            
            # 分布特征洞察
            if 'descriptive_stats' in stats:
                desc_stats = stats['descriptive_stats']
                for var, var_stats in desc_stats.items():
                    if isinstance(var_stats, dict) and 'skewness' in var_stats:
                        skewness = var_stats['skewness']
                        
                        if abs(skewness) > 1:
                            template_key = 'skewed_right' if skewness > 0 else 'skewed_left'
                            description = self.insight_templates['distribution'][template_key].format(var=var)
                            
                            insights.append(Insight(
                                title=f"分布特征：{var}",
                                description=description,
                                importance='medium',
                                category='statistical',
                                confidence=min(abs(skewness) / 2, 0.9),
                                data={'skewness': skewness, 'variable': var}
                            ))
            
        except Exception as e:
            self.logger.error(f"生成统计洞察失败: {e}")
        
        return insights
    
    def _generate_clustering_insights(self, cluster_results: Dict[str, Any]) -> List[Insight]:
        """生成聚类分析洞察"""
        insights = []
        
        try:
            if 'silhouette_score' in cluster_results and 'n_clusters' in cluster_results:
                silhouette = cluster_results['silhouette_score']
                n_clusters = cluster_results['n_clusters']
                
                if silhouette > 0.7:
                    template_key = 'clear_separation'
                    importance = 'high'
                elif silhouette > 0.5:
                    template_key = 'moderate_separation'
                    importance = 'medium'
                else:
                    template_key = 'poor_separation'
                    importance = 'low'
                
                description = self.insight_templates['clustering'][template_key].format(
                    n_clusters=n_clusters, silhouette=silhouette
                )
                
                insights.append(Insight(
                    title=f"聚类质量评估",
                    description=description,
                    importance=importance,
                    category='clustering',
                    confidence=silhouette,
                    data={'silhouette_score': silhouette, 'n_clusters': n_clusters}
                ))
            
            # 聚类大小分析
            if 'cluster_sizes' in cluster_results:
                cluster_sizes = cluster_results['cluster_sizes']
                total_size = sum(cluster_sizes)
                
                # 检查是否有不平衡的聚类
                max_size = max(cluster_sizes)
                min_size = min(cluster_sizes)
                
                if max_size / min_size > 5:  # 聚类大小差异很大
                    insights.append(Insight(
                        title="聚类大小不平衡",
                        description=f"聚类大小存在显著不平衡，最大聚类包含 {max_size} 个样本（{max_size/total_size*100:.1f}%），最小聚类包含 {min_size} 个样本（{min_size/total_size*100:.1f}%）。",
                        importance='medium',
                        category='clustering',
                        confidence=0.8,
                        data={'cluster_sizes': cluster_sizes, 'imbalance_ratio': max_size / min_size}
                    ))
            
        except Exception as e:
            self.logger.error(f"生成聚类洞察失败: {e}")
        
        return insights
    
    def _generate_text_insights(self, text_results: Dict[str, Any]) -> List[Insight]:
        """生成文本分析洞察"""
        insights = []
        
        try:
            # 情感分析洞察
            if 'sentiment_analysis' in text_results:
                sentiment = text_results['sentiment_analysis']
                if 'sentiment_percentages' in sentiment:
                    percentages = sentiment['sentiment_percentages']
                    
                    positive = percentages.get('positive', 0)
                    negative = percentages.get('negative', 0)
                    neutral = percentages.get('neutral', 0)
                    
                    # 判断主导情感
                    if positive > 60:
                        template_key = 'positive'
                        importance = 'high'
                    elif negative > 60:
                        template_key = 'negative'
                        importance = 'high'
                    elif neutral > 60:
                        template_key = 'neutral'
                        importance = 'medium'
                    else:
                        template_key = 'mixed'
                        importance = 'medium'
                    
                    description = self.insight_templates['text_sentiment'][template_key].format(
                        positive=positive, negative=negative, neutral=neutral
                    )
                    
                    insights.append(Insight(
                        title="情感倾向分析",
                        description=description,
                        importance=importance,
                        category='text',
                        confidence=max(positive, negative, neutral) / 100,
                        data=percentages
                    ))
            
            # 主题分析洞察
            if 'topic_modeling' in text_results:
                topics = text_results['topic_modeling']
                if 'topics' in topics and len(topics['topics']) > 0:
                    n_topics = len(topics['topics'])
                    
                    insights.append(Insight(
                        title="主题发现",
                        description=f"文本分析识别出 {n_topics} 个主要主题，反映了用户关注的不同方面。主要主题包括：" + 
                                  "、".join([f"{topic['words'][0]}" for topic in topics['topics'][:3]]) + "等。",
                        importance='medium',
                        category='text',
                        confidence=0.7,
                        data={'n_topics': n_topics, 'topics': topics['topics']}
                    ))
            
            # 关键词洞察
            if 'keywords' in text_results:
                keywords = text_results['keywords']
                if len(keywords) > 0:
                    top_keywords = [kw[0] for kw in keywords[:5]]
                    
                    insights.append(Insight(
                        title="关键词发现",
                        description=f"文本分析提取出的关键词反映了用户的主要关注点：{', '.join(top_keywords)}。",
                        importance='medium',
                        category='text',
                        confidence=0.6,
                        data={'keywords': keywords}
                    ))
            
        except Exception as e:
            self.logger.error(f"生成文本洞察失败: {e}")
        
        return insights
    
    def _generate_data_quality_insights(self, quality_results: Dict[str, Any]) -> List[Insight]:
        """生成数据质量洞察"""
        insights = []
        
        try:
            # 异常值洞察
            if 'outliers' in quality_results:
                outliers = quality_results['outliers']
                total_count = quality_results.get('total_records', 1)
                outlier_count = len(outliers) if isinstance(outliers, list) else outliers.get('count', 0)
                percentage = (outlier_count / total_count) * 100
                
                if percentage < 5:
                    template_key = 'few'
                    importance = 'low'
                elif percentage < 15:
                    template_key = 'moderate'
                    importance = 'medium'
                else:
                    template_key = 'many'
                    importance = 'high'
                
                description = self.insight_templates['outliers'][template_key].format(
                    count=outlier_count, percentage=percentage
                )
                
                insights.append(Insight(
                    title="异常值检测",
                    description=description,
                    importance=importance,
                    category='data_quality',
                    confidence=0.9,
                    data={'outlier_count': outlier_count, 'percentage': percentage}
                ))
            
            # 缺失值洞察
            if 'missing_values' in quality_results:
                missing = quality_results['missing_values']
                if isinstance(missing, dict):
                    high_missing_vars = [var for var, pct in missing.items() if pct > 20]
                    
                    if high_missing_vars:
                        insights.append(Insight(
                            title="数据完整性问题",
                            description=f"发现以下变量存在较高的缺失率：{', '.join(high_missing_vars)}，可能影响分析结果的可靠性。",
                            importance='high',
                            category='data_quality',
                            confidence=0.9,
                            data={'high_missing_variables': high_missing_vars}
                        ))
            
        except Exception as e:
            self.logger.error(f"生成数据质量洞察失败: {e}")
        
        return insights
    
    def auto_summary(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> Dict[str, Any]:
        """
        自动生成数据摘要
        
        Args:
            data: 原始数据
            analysis_results: 分析结果
            
        Returns:
            自动摘要结果
        """
        try:
            summary = {
                'timestamp': datetime.now().isoformat(),
                'data_overview': self._generate_data_overview(data),
                'key_findings': self._extract_key_findings(analysis_results),
                'insights': [insight.__dict__ for insight in self.generate_insights(analysis_results)],
                'recommendations': self._generate_recommendations(analysis_results),
                'methodology': self._describe_methodology(analysis_results)
            }
            
            return summary
            
        except Exception as e:
            self.logger.error(f"自动摘要生成失败: {e}")
            return {'error': str(e)}
    
    def _generate_data_overview(self, data: pd.DataFrame) -> Dict[str, Any]:
        """生成数据概览"""
        try:
            numeric_cols = data.select_dtypes(include=[np.number]).columns.tolist()
            categorical_cols = data.select_dtypes(include=['object', 'category']).columns.tolist()
            
            return {
                'total_records': len(data),
                'total_variables': len(data.columns),
                'numeric_variables': len(numeric_cols),
                'categorical_variables': len(categorical_cols),
                'missing_data_percentage': (data.isnull().sum().sum() / (len(data) * len(data.columns))) * 100,
                'data_types': {
                    'numeric': numeric_cols,
                    'categorical': categorical_cols
                }
            }
        except Exception as e:
            self.logger.error(f"数据概览生成失败: {e}")
            return {}
    
    def _extract_key_findings(self, analysis_results: Dict[str, Any]) -> List[str]:
        """提取关键发现"""
        findings = []
        
        try:
            # 从各个分析模块提取关键发现
            if 'statistical_analysis' in analysis_results:
                stats = analysis_results['statistical_analysis']
                if 'correlation_matrix' in stats:
                    findings.append("完成了变量间相关性分析，识别出关键关联关系")
            
            if 'cluster_analysis' in analysis_results:
                cluster = analysis_results['cluster_analysis']
                if 'n_clusters' in cluster:
                    findings.append(f"聚类分析识别出 {cluster['n_clusters']} 个不同的数据群体")
            
            if 'text_analysis' in analysis_results:
                text = analysis_results['text_analysis']
                if 'sentiment_analysis' in text:
                    findings.append("完成了文本情感分析，了解用户态度倾向")
                if 'topic_modeling' in text:
                    findings.append("进行了主题建模，发现用户关注的主要话题")
            
            return findings
            
        except Exception as e:
            self.logger.error(f"关键发现提取失败: {e}")
            return []
    
    def _generate_recommendations(self, analysis_results: Dict[str, Any]) -> List[str]:
        """生成推荐建议"""
        recommendations = []
        
        try:
            # 基于分析结果生成针对性建议
            insights = self.generate_insights(analysis_results)
            
            # 根据洞察的重要性和类别生成建议
            high_importance_insights = [i for i in insights if i.importance == 'high']
            medium_importance_insights = [i for i in insights if i.importance == 'medium']
            
            # 高优先级建议
            for insight in high_importance_insights:
                if insight.category in ['correlation', 'clustering']:
                    recommendations.extend(random.sample(self.recommendation_templates['actionable'], min(2, len(self.recommendation_templates['actionable']))))
                    recommendations.extend(random.sample(self.recommendation_templates['business_insights'], min(1, len(self.recommendation_templates['business_insights']))))
                elif insight.category in ['data_quality', 'outliers']:
                    recommendations.extend(random.sample(self.recommendation_templates['data_quality'], min(2, len(self.recommendation_templates['data_quality']))))
                elif insight.category in ['text', 'text_analysis', 'sentiment']:
                    recommendations.extend(random.sample(self.recommendation_templates['business_insights'], min(2, len(self.recommendation_templates['business_insights']))))
            
            # 中优先级建议
            for insight in medium_importance_insights:
                if insight.category in ['correlation', 'clustering']:
                    recommendations.extend(random.sample(self.recommendation_templates['analysis_depth'], min(1, len(self.recommendation_templates['analysis_depth']))))
                elif insight.category in ['text', 'text_analysis']:
                    recommendations.extend(random.sample(self.recommendation_templates['business_insights'], min(1, len(self.recommendation_templates['business_insights']))))
                else:
                    recommendations.extend(random.sample(self.recommendation_templates['analysis_depth'], min(1, len(self.recommendation_templates['analysis_depth']))))
            
            # 基于具体分析结果添加针对性建议
            if 'cluster_analysis' in analysis_results:
                cluster_results = analysis_results['cluster_analysis']
                if 'n_clusters' in cluster_results:
                    n_clusters = cluster_results['n_clusters']
                    recommendations.append(f"基于识别出的 {n_clusters} 个用户群体，建议为每个群体制定差异化的产品策略和营销方案。")
                
                if 'silhouette_score' in cluster_results:
                    silhouette = cluster_results['silhouette_score']
                    if silhouette < 0.5:
                        recommendations.append("聚类质量较低，建议尝试不同的聚类算法或调整聚类参数以获得更好的分群效果。")
            
            if 'text_analysis' in analysis_results:
                text_results = analysis_results['text_analysis']
                if 'topic_modeling' in text_results and 'topics' in text_results['topic_modeling']:
                    topics = text_results['topic_modeling']['topics']
                    if len(topics) > 0:
                        top_words = [topic['words'][0] for topic in topics[:3] if 'words' in topic and len(topic['words']) > 0]
                        if top_words:
                            recommendations.append(f"根据主题分析结果，用户主要关注 {', '.join(top_words)} 等方面，建议在这些领域加强产品功能和服务。")
                
                if 'sentiment_analysis' in text_results:
                    sentiment = text_results['sentiment_analysis']
                    if 'sentiment_percentages' in sentiment:
                        percentages = sentiment['sentiment_percentages']
                        negative = percentages.get('negative', 0)
                        if negative > 30:
                            recommendations.append(f"检测到 {negative:.1f}% 的负面情感，建议重点关注用户痛点并制定改进措施。")
            
            if 'statistical_analysis' in analysis_results:
                stats = analysis_results['statistical_analysis']
                if 'correlation_analysis' in stats:
                    corr_results = stats['correlation_analysis']
                    if 'strong_correlations' in corr_results and len(corr_results['strong_correlations']) > 0:
                        recommendations.append("发现了强相关变量，建议深入分析这些关联关系对业务的影响，并考虑在决策中综合考虑相关因素。")
            
            # 添加技术优化建议
            recommendations.extend(random.sample(self.recommendation_templates['technical_optimization'], min(2, len(self.recommendation_templates['technical_optimization']))))
            
            # 如果建议不足，添加通用建议
            if len(recommendations) < 5:
                all_templates = []
                for category in ['data_quality', 'analysis_depth', 'business_insights']:
                    all_templates.extend(self.recommendation_templates[category])
                needed = 5 - len(recommendations)
                if all_templates:
                    recommendations.extend(random.sample(all_templates, min(needed, len(all_templates))))
            
            # 去重并限制数量
            unique_recommendations = list(dict.fromkeys(recommendations))  # 保持顺序的去重
            return unique_recommendations[:8]  # 最多返回8条建议
            
        except Exception as e:
            self.logger.error(f"推荐建议生成失败: {e}")
            return []
    
    def _describe_methodology(self, analysis_results: Dict[str, Any]) -> Dict[str, str]:
        """描述分析方法"""
        methodology = {}
        
        try:
            if 'statistical_analysis' in analysis_results:
                methodology['statistical'] = "使用描述性统计、相关性分析和假设检验等方法进行统计分析"
            
            if 'cluster_analysis' in analysis_results:
                methodology['clustering'] = "采用K-means、层次聚类等算法进行群体划分和特征分析"
            
            if 'text_analysis' in analysis_results:
                methodology['text'] = "运用自然语言处理技术进行情感分析、主题建模和关键词提取"
            
            return methodology
            
        except Exception as e:
            self.logger.error(f"方法描述生成失败: {e}")
            return {}
    
    def recommendation_engine(self, user_profile: Dict[str, Any]) -> Dict[str, Any]:
        """
        基于用户画像生成个性化推荐
        
        Args:
            user_profile: 用户画像数据
            
        Returns:
            个性化推荐结果
        """
        try:
            recommendations = {
                'analysis_suggestions': [],
                'data_collection_suggestions': [],
                'visualization_suggestions': [],
                'business_insights': []
            }
            
            # 基于聚类结果的推荐
            if 'cluster_id' in user_profile:
                cluster_id = user_profile['cluster_id']
                cluster_characteristics = user_profile.get('cluster_characteristics', {})
                
                recommendations['business_insights'].append(
                    f"用户属于群体 {cluster_id}，该群体的主要特征是：{cluster_characteristics}"
                )
            
            # 基于数据特征的推荐
            if 'data_completeness' in user_profile:
                completeness = user_profile['data_completeness']
                if completeness < 0.8:
                    recommendations['data_collection_suggestions'].append(
                        "建议收集更多数据以提高分析的准确性和可靠性"
                    )
            
            # 基于分析历史的推荐
            if 'analysis_history' in user_profile:
                history = user_profile['analysis_history']
                if 'clustering' not in history:
                    recommendations['analysis_suggestions'].append(
                        "建议进行聚类分析以发现用户群体特征"
                    )
                if 'text_analysis' not in history:
                    recommendations['analysis_suggestions'].append(
                        "如有文本数据，建议进行文本分析以获得更深入的洞察"
                    )
            
            return recommendations
            
        except Exception as e:
            self.logger.error(f"推荐引擎失败: {e}")
            return {'error': str(e)}
    
    def generate_executive_summary(self, analysis_results: Dict[str, Any]) -> str:
        """
        生成执行摘要
        
        Args:
            analysis_results: 分析结果
            
        Returns:
            执行摘要文本
        """
        try:
            insights = self.generate_insights(analysis_results)
            high_priority_insights = [i for i in insights if i.importance == 'high']
            medium_priority_insights = [i for i in insights if i.importance == 'medium']
            
            summary_parts = [
                "# 执行摘要\n\n",
                f"本次分析于 {datetime.now().strftime('%Y年%m月%d日')} 完成，",
                f"共生成 {len(insights)} 项洞察，其中 {len(high_priority_insights)} 项为高优先级发现。\n\n"
            ]
            
            # 数据概览
            if any(key in analysis_results for key in ['cluster_analysis', 'statistical_analysis', 'text_analysis']):
                summary_parts.append("## 数据概览\n")
                
                if 'cluster_analysis' in analysis_results:
                    cluster_data = analysis_results['cluster_analysis']
                    if 'n_clusters' in cluster_data:
                        summary_parts.append(f"- 聚类分析：识别出 {cluster_data['n_clusters']} 个不同的数据群体\n")
                    if 'silhouette_score' in cluster_data:
                        quality = "优秀" if cluster_data['silhouette_score'] > 0.7 else "良好" if cluster_data['silhouette_score'] > 0.5 else "一般"
                        summary_parts.append(f"- 聚类质量：{quality}（轮廓系数：{cluster_data['silhouette_score']:.3f}）\n")
                
                if 'statistical_analysis' in analysis_results:
                    stats_data = analysis_results['statistical_analysis']
                    if 'correlation_analysis' in stats_data:
                        summary_parts.append("- 统计分析：完成了变量间相关性分析和显著性检验\n")
                
                if 'text_analysis' in analysis_results:
                    text_data = analysis_results['text_analysis']
                    if 'topic_modeling' in text_data and 'topics' in text_data['topic_modeling']:
                        n_topics = len(text_data['topic_modeling']['topics'])
                        summary_parts.append(f"- 文本分析：识别出 {n_topics} 个主要主题，完成情感分析\n")
                
                summary_parts.append("\n")
            
            # 关键发现
            if high_priority_insights:
                summary_parts.append("## 关键发现\n")
                for i, insight in enumerate(high_priority_insights[:3], 1):
                    summary_parts.append(f"✅ **{insight.title}**: {insight.description}\n")
                summary_parts.append("\n")
            
            # 重要洞察
            if medium_priority_insights:
                summary_parts.append("## 重要洞察\n")
                for i, insight in enumerate(medium_priority_insights[:3], 1):
                    summary_parts.append(f"📊 **{insight.title}**: {insight.description}\n")
                summary_parts.append("\n")
            
            # 推荐建议
            recommendations = self._generate_recommendations(analysis_results)
            if recommendations:
                summary_parts.append("## 推荐建议\n")
                
                # 分类显示推荐建议
                business_recs = [rec for rec in recommendations if any(keyword in rec for keyword in ['用户', '群体', '营销', '产品', '服务', '业务'])]
                technical_recs = [rec for rec in recommendations if any(keyword in rec for keyword in ['数据', '算法', '模型', '分析', '技术'])]
                other_recs = [rec for rec in recommendations if rec not in business_recs and rec not in technical_recs]
                
                if business_recs:
                    summary_parts.append("### 业务策略建议\n")
                    for i, rec in enumerate(business_recs[:3], 1):
                        summary_parts.append(f"{i}. {rec}\n")
                    summary_parts.append("\n")
                
                if technical_recs:
                    summary_parts.append("### 技术优化建议\n")
                    for i, rec in enumerate(technical_recs[:3], 1):
                        summary_parts.append(f"{i}. {rec}\n")
                    summary_parts.append("\n")
                
                if other_recs:
                    summary_parts.append("### 其他建议\n")
                    for i, rec in enumerate(other_recs[:2], 1):
                        summary_parts.append(f"{i}. {rec}\n")
                    summary_parts.append("\n")
            
            # 后续行动
            summary_parts.append("## 后续行动\n")
            summary_parts.append("1. 🎯 **短期目标**：根据高优先级发现制定具体的改进措施\n")
            summary_parts.append("2. 📈 **中期规划**：建立数据监控机制，定期更新分析模型\n")
            summary_parts.append("3. 🚀 **长期愿景**：构建数据驱动的决策体系，持续优化业务表现\n")
            
            return "".join(summary_parts)
            
        except Exception as e:
            self.logger.error(f"执行摘要生成失败: {e}")
            return f"执行摘要生成失败: {e}"
    
    def generate_intelligent_report(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> Dict[str, Any]:
        """生成AI增强的智能报告"""
        return self.generate_comprehensive_report(data, analysis_results)
    
    def generate_report(self, data: pd.DataFrame, title: str = "数据分析报告", **kwargs) -> Dict[str, Any]:
        """生成报告的主要接口方法
        
        Args:
            data: 数据DataFrame
            title: 报告标题
            **kwargs: 其他参数
            
        Returns:
            报告数据字典
        """
        try:
            # 执行基础分析
            analysis_results = {}
            
            # 基础统计分析
            try:
                numeric_cols = data.select_dtypes(include=[np.number]).columns
                if len(numeric_cols) > 0:
                    analysis_results['descriptive_stats'] = data[numeric_cols].describe().to_dict()
                    
                    # 相关性分析
                    if len(numeric_cols) > 1:
                        correlation_matrix = data[numeric_cols].corr()
                        analysis_results['correlation_matrix'] = correlation_matrix.to_dict()
            except Exception as e:
                self.logger.warning(f"统计分析失败: {e}")
            
            # 数据质量分析
            analysis_results['data_quality'] = {
                'total_records': len(data),
                'total_columns': len(data.columns),
                'missing_values': data.isnull().sum().to_dict(),
                'missing_percentage': (data.isnull().sum() / len(data) * 100).to_dict(),
                'duplicate_rows': int(data.duplicated().sum())
            }
            
            # 生成洞察
            insights = self.generate_insights(analysis_results)
            
            # 生成推荐
            recommendations = self.generate_recommendations(analysis_results)
            
            # 构建报告
            report = {
                'title': title,
                'generated_at': datetime.now().isoformat(),
                'summary': {
                    'total_rows': len(data),
                    'total_columns': len(data.columns),
                    'numeric_columns': len(data.select_dtypes(include=[np.number]).columns),
                    'categorical_columns': len(data.select_dtypes(include=['object', 'category']).columns)
                },
                'data_quality': analysis_results['data_quality'],
                'descriptive_statistics': analysis_results.get('descriptive_stats', {}),
                'correlation_analysis': analysis_results.get('correlation_matrix', {}),
                'insights': [{
                    'title': insight.title,
                    'description': insight.description,
                    'importance': insight.importance,
                    'category': insight.category,
                    'confidence': insight.confidence
                } for insight in insights],
                'recommendations': recommendations
            }
            
            return convert_numpy_types(report)
            
        except Exception as e:
            self.logger.error(f"生成报告失败: {e}")
            return {
                'title': title,
                'generated_at': datetime.now().isoformat(),
                'error': f"报告生成失败: {str(e)}",
                'summary': {
                    'total_rows': len(data) if data is not None else 0,
                    'total_columns': len(data.columns) if data is not None else 0
                }
            }
    
    def generate_recommendations(self, analysis_results: Dict[str, Any]) -> List[str]:
        """基于分析结果生成推荐建议
        
        Args:
            analysis_results: 分析结果字典
            
        Returns:
            推荐建议列表
        """
        recommendations = []
        
        try:
            # 数据质量相关推荐
            if 'data_quality' in analysis_results:
                quality = analysis_results['data_quality']
                
                # 缺失值推荐
                missing_pct = sum(quality.get('missing_percentage', {}).values()) / len(quality.get('missing_percentage', {}))
                if missing_pct > 10:
                    recommendations.append("建议对缺失值较多的变量进行数据补充或使用插值方法处理。")
                elif missing_pct > 5:
                    recommendations.append("建议关注缺失值的分布模式，考虑是否存在系统性缺失。")
                
                # 重复值推荐
                duplicate_count = quality.get('duplicate_rows', 0)
                if duplicate_count > 0:
                    recommendations.append(f"发现 {duplicate_count} 条重复记录，建议进行数据去重处理。")
            
            # 相关性分析推荐
            if 'correlation_matrix' in analysis_results:
                corr_matrix = analysis_results['correlation_matrix']
                high_corr_pairs = []
                
                for var1 in corr_matrix:
                    for var2 in corr_matrix[var1]:
                        if var1 != var2 and abs(corr_matrix[var1][var2]) > 0.8:
                            high_corr_pairs.append((var1, var2, corr_matrix[var1][var2]))
                
                if high_corr_pairs:
                    recommendations.append("发现强相关变量，建议在建模时考虑多重共线性问题。")
                    recommendations.append("可以考虑使用主成分分析(PCA)或特征选择方法降低变量间的相关性。")
            
            # 描述性统计推荐
            if 'descriptive_stats' in analysis_results:
                stats = analysis_results['descriptive_stats']
                
                for var, var_stats in stats.items():
                    if isinstance(var_stats, dict):
                        # 检查偏度
                        if 'std' in var_stats and var_stats['std'] == 0:
                            recommendations.append(f"变量 {var} 的标准差为0，建议检查数据是否为常数。")
                        
                        # 检查极值
                        if 'min' in var_stats and 'max' in var_stats and 'mean' in var_stats:
                            range_val = var_stats['max'] - var_stats['min']
                            if range_val > var_stats['mean'] * 100:  # 简单的极值检测
                                recommendations.append(f"变量 {var} 存在可能的极值，建议进行异常值检测和处理。")
            
            # 通用推荐
            if not recommendations:
                recommendations.extend([
                    "数据质量良好，建议进行更深入的探索性数据分析。",
                    "可以考虑进行聚类分析以发现数据中的潜在模式。",
                    "建议根据业务需求进行针对性的统计建模分析。"
                ])
            
            # 限制推荐数量
            return recommendations[:5]
            
        except Exception as e:
            self.logger.error(f"生成推荐失败: {e}")
            return ["建议进行更详细的数据探索和质量检查。"]
    
    def generate_report_from_template(self, data: pd.DataFrame,
                                     analysis_results: Dict[str, Any],
                                     cluster_results: Optional[Dict[str, Any]] = None,
                                     template_id: str = "comprehensive") -> Dict[str, Any]:
        """使用固定模板生成报告"""
        try:
            # 获取模板
            template = ReportTemplates.get_template_by_id(template_id)
            if not template:
                template = ReportTemplates.get_default_template()
            
            # 生成报告内容
            report = {
                "template_info": template["metadata"],
                "report_title": template["template_name"],
                "generated_at": datetime.now().isoformat(),
                "data_summary": {
                    "rows": len(data),
                    "columns": len(data.columns),
                    "data_types": data.dtypes.value_counts().to_dict()
                },
                "sections": []
            }
            
            # 根据模板生成各个章节
            for section_config in sorted(template["sections"], key=lambda x: x["order"]):
                section_content = self._generate_section_content(
                    section_config, data, analysis_results, cluster_results
                )
                
                if section_content or section_config["required"]:
                    report["sections"].append({
                        "id": section_config["id"],
                        "title": section_config["title"],
                        "icon": section_config.get("icon", "fas fa-file-alt"),
                        "order": section_config["order"],
                        "content": section_content or "内容生成中...",
                        "type": "markdown"
                    })
            
            # 转换NumPy类型为JSON可序列化类型
            report = convert_numpy_types(report)
            # 转换NumPy类型为JSON可序列化类型
            return convert_numpy_types(report)
            
        except Exception as e:
            logging.error(f"使用模板生成报告失败: {str(e)}")
            return self._generate_fallback_report(data, analysis_results)
    
    def _generate_section_content(self, section_config: Dict[str, Any],
                                data: pd.DataFrame,
                                analysis_results: Dict[str, Any],
                                cluster_results: Optional[Dict[str, Any]] = None) -> str:
        """根据章节配置生成内容"""
        section_id = section_config["id"]
        
        # 根据章节ID生成对应内容
        if section_id == "executive_summary":
            return self._generate_executive_summary_content(data, analysis_results, cluster_results)
        elif section_id == "data_overview":
            return self._generate_data_overview_content(data)
        elif section_id == "descriptive_statistics":
            return self._generate_descriptive_stats_content(analysis_results)
        elif section_id == "correlation_analysis":
            return self._generate_correlation_content(analysis_results)
        elif section_id == "cluster_analysis":
            return self._generate_cluster_content(cluster_results) if cluster_results else None
        elif section_id == "ai_insights":
            return self._generate_ai_insights_content(data, analysis_results)
        elif section_id == "recommendations":
            return self._generate_recommendations_content(analysis_results, cluster_results)
        elif section_id == "methodology":
            return self._generate_methodology_content()
        elif section_id == "key_findings":
            return self._generate_key_findings_content(analysis_results)
        elif section_id == "data_highlights":
            return self._generate_data_highlights_content(data, analysis_results)
        elif section_id == "business_impact":
            return self._generate_business_impact_content(analysis_results)
        elif section_id == "priority_actions":
            return self._generate_priority_actions_content(analysis_results)
        else:
            return f"## {section_config['title']}\n\n{section_config.get('description', '内容生成中...')}"
    
    def _generate_executive_summary_content(self, data: pd.DataFrame,
                                          analysis_results: Dict[str, Any],
                                          cluster_results: Optional[Dict[str, Any]] = None) -> str:
        """生成执行摘要内容"""
        try:
            key_findings = []
            
            # 数据规模
            key_findings.append(f"分析了包含 {len(data)} 个样本和 {len(data.columns)} 个变量的数据集")
            
            # 统计发现
            if "descriptive_stats" in analysis_results:
                stats = analysis_results["descriptive_stats"]
                numeric_cols = len([col for col in stats if isinstance(stats[col], dict) and "mean" in stats[col]])
                key_findings.append(f"识别出 {numeric_cols} 个数值型变量的关键统计特征")
            
            # 相关性发现
            if "correlation_matrix" in analysis_results:
                corr_matrix = analysis_results["correlation_matrix"]
                if isinstance(corr_matrix, dict):
                    high_corr_pairs = 0
                    for col1 in corr_matrix:
                        for col2 in corr_matrix[col1]:
                            if col1 != col2 and abs(corr_matrix[col1][col2]) > 0.7:
                                high_corr_pairs += 1
                    if high_corr_pairs > 0:
                        key_findings.append(f"发现 {high_corr_pairs//2} 对变量存在强相关关系")
            
            # 聚类发现
            if cluster_results and "cluster_summary" in cluster_results:
                n_clusters = cluster_results["cluster_summary"].get("n_clusters", 0)
                if n_clusters > 0:
                    key_findings.append(f"识别出 {n_clusters} 个不同的用户群体")
            
            content = "## 执行摘要\n\n"
            content += "### 关键发现\n\n"
            for i, finding in enumerate(key_findings, 1):
                content += f"{i}. {finding}\n"
            
            content += "\n### 数据质量评估\n\n"
            missing_pct = (data.isnull().sum().sum() / (len(data) * len(data.columns))) * 100
            content += f"- **数据完整性**: {100-missing_pct:.1f}%\n"
            content += f"- **样本规模**: {len(data):,} 条记录\n"
            content += f"- **变量数量**: {len(data.columns)} 个\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成执行摘要失败: {str(e)}")
            return "## 执行摘要\n\n内容生成中..."
    
    def _generate_data_overview_content(self, data: pd.DataFrame) -> str:
        """生成数据概览内容"""
        try:
            content = "## 数据概览\n\n"
            
            # 基本信息
            content += "### 基本信息\n\n"
            content += f"- **数据集大小**: {len(data):,} 行 × {len(data.columns)} 列\n"
            
            # 数据类型分布
            dtype_counts = data.dtypes.value_counts()
            content += "- **数据类型分布**:\n"
            for dtype, count in dtype_counts.items():
                content += f"  - {dtype}: {count} 个变量\n"
            
            # 缺失值统计
            missing_stats = data.isnull().sum()
            missing_cols = missing_stats[missing_stats > 0]
            content += f"\n### 数据质量\n\n"
            content += f"- **完整性**: {((1 - data.isnull().sum().sum() / (len(data) * len(data.columns))) * 100):.1f}%\n"
            
            if len(missing_cols) > 0:
                content += "- **缺失值分布**:\n"
                for col, missing_count in missing_cols.head(5).items():
                    pct = (missing_count / len(data)) * 100
                    content += f"  - {col}: {missing_count} ({pct:.1f}%)\n"
                if len(missing_cols) > 5:
                    content += f"  - 其他 {len(missing_cols) - 5} 个变量也存在缺失值\n"
            else:
                content += "- **缺失值**: 无缺失值\n"
            
            # 数值变量概览
            numeric_cols = data.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) > 0:
                content += f"\n### 数值变量概览 ({len(numeric_cols)} 个)\n\n"
                content += "| 变量名 | 均值 | 标准差 | 最小值 | 最大值 |\n"
                content += "|--------|------|--------|--------|--------|\n"
                
                for col in numeric_cols[:5]:  # 只显示前5个
                    stats = data[col].describe()
                    content += f"| {col} | {stats['mean']:.2f} | {stats['std']:.2f} | {stats['min']:.2f} | {stats['max']:.2f} |\n"
                
                if len(numeric_cols) > 5:
                    content += f"\n*注: 还有 {len(numeric_cols) - 5} 个数值变量未显示*\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成数据概览失败: {str(e)}")
            return "## 数据概览\n\n内容生成中..."
    
    def _generate_fallback_report(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> Dict[str, Any]:
        """生成备用报告"""
        return {
            "template_info": {
                "template_name": "基础报告",
                "version": "1.0",
                "created_at": datetime.now().isoformat()
            },
            "report_title": "数据分析报告",
            "generated_at": datetime.now().isoformat(),
            "data_summary": {
                "rows": len(data),
                "columns": len(data.columns)
            },
            "sections": [
                {
                    "id": "basic_info",
                    "title": "基本信息",
                    "content": f"数据集包含 {len(data)} 行和 {len(data.columns)} 列。",
                    "type": "markdown",
                    "order": 1
                }
            ]
        }

    def generate_comprehensive_report(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> Dict[str, Any]:
        """
        生成综合智能报告
        
        Args:
            data: 原始数据
            analysis_results: 分析结果
            
        Returns:
            综合报告结构
        """
        try:
            # 生成洞察
            insights = self.generate_insights(analysis_results)
            
            # 生成自动摘要
            auto_summary = self.auto_summary(data, analysis_results)
            
            # 生成执行摘要
            executive_summary = self.generate_executive_summary(analysis_results)
            
            # 构建报告结构
            report = {
                'metadata': {
                    'title': '智能数据分析报告',
                    'generated_at': datetime.now().isoformat(),
                    'data_shape': data.shape,
                    'analysis_modules': list(analysis_results.keys())
                },
                'executive_summary': executive_summary,
                'data_overview': auto_summary.get('data_overview', {}),
                'key_insights': {
                    'high_priority': [i.__dict__ for i in insights if i.importance == 'high'],
                    'medium_priority': [i.__dict__ for i in insights if i.importance == 'medium'],
                    'low_priority': [i.__dict__ for i in insights if i.importance == 'low']
                },
                'detailed_analysis': self._generate_detailed_analysis(analysis_results),
                'recommendations': self._generate_recommendations(analysis_results),
                'methodology': auto_summary.get('methodology', {}),
                'appendix': {
                    'data_quality': self._assess_data_quality(data),
                    'statistical_tests': self._summarize_statistical_tests(analysis_results),
                    'technical_notes': self._generate_technical_notes(analysis_results)
                }
            }
            
            # 转换NumPy类型为JSON可序列化类型
            return convert_numpy_types(report)
            
        except Exception as e:
            self.logger.error(f"综合报告生成失败: {e}")
            return {'error': str(e)}
    
    def _generate_descriptive_stats_content(self, analysis_results: Dict[str, Any]) -> str:
        """生成描述性统计内容"""
        try:
            content = "## 描述性统计\n\n"
            
            if "descriptive_stats" in analysis_results:
                stats = analysis_results["descriptive_stats"]
                content += "### 数值变量统计摘要\n\n"
                content += "| 变量 | 均值 | 标准差 | 最小值 | 最大值 | 偏度 |\n"
                content += "|------|------|--------|--------|--------|------|\n"
                
                for var, var_stats in stats.items():
                    if isinstance(var_stats, dict) and "mean" in var_stats:
                        content += f"| {var} | {var_stats.get('mean', 0):.2f} | {var_stats.get('std', 0):.2f} | {var_stats.get('min', 0):.2f} | {var_stats.get('max', 0):.2f} | {var_stats.get('skewness', 0):.2f} |\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成描述性统计内容失败: {str(e)}")
            return "## 描述性统计\n\n内容生成中..."
    
    def _generate_correlation_content(self, analysis_results: Dict[str, Any]) -> str:
        """生成相关性分析内容"""
        try:
            content = "## 📊 相关性分析\n\n"
            
            if "correlation_analysis" in analysis_results:
                corr_analysis = analysis_results["correlation_analysis"]
                
                # 分析概览
                if "summary" in corr_analysis:
                    summary = corr_analysis["summary"]
                    content += "### 🔍 分析概览\n\n"
                    content += f"- **分析变量数量**: {len(summary.get('features_analyzed', []))}\n"
                    content += f"- **显著相关对数**: {summary.get('total_pairs', 0)}\n"
                    content += f"- **最大相关系数**: {summary.get('max_correlation', 0):.3f}\n"
                    content += f"- **平均相关系数**: {summary.get('mean_correlation', 0):.3f}\n\n"
                
                # 强相关关系详细分析
                if "strong_correlations" in corr_analysis:
                    strong_corrs = corr_analysis["strong_correlations"]
                    
                    if strong_corrs:
                        content += "### 🔥 显著相关关系\n\n"
                        content += "| 变量1 | 变量2 | 相关系数 | P值 | 显著性 | 关系解释 |\n"
                        content += "|-------|-------|----------|-----|--------|----------|\n"
                        
                        for corr_info in strong_corrs[:15]:  # 显示前15个
                            var1 = corr_info.get('variable1', '')
                            var2 = corr_info.get('variable2', '')
                            corr_val = corr_info.get('correlation', 0)
                            p_val = corr_info.get('p_value', 1)
                            
                            # 判断相关性强度和方向
                            if abs(corr_val) >= 0.8:
                                strength = "极强"
                            elif abs(corr_val) >= 0.6:
                                strength = "强"
                            elif abs(corr_val) >= 0.4:
                                strength = "中等"
                            else:
                                strength = "弱"
                            
                            direction = "正相关" if corr_val > 0 else "负相关"
                            significance = "***" if p_val < 0.001 else "**" if p_val < 0.01 else "*" if p_val < 0.05 else "ns"
                            
                            explanation = f"{strength}{direction}"
                            if corr_val > 0:
                                explanation += "，两变量同向变化"
                            else:
                                explanation += "，两变量反向变化"
                            
                            content += f"| {var1} | {var2} | {corr_val:.3f} | {p_val:.4f} | {significance} | {explanation} |\n"
                        
                        content += "\n**显著性标记**: *** p<0.001, ** p<0.01, * p<0.05, ns 不显著\n\n"
                    else:
                        content += "### ℹ️ 相关性结果\n\n"
                        content += "未发现显著的强相关关系（|r| > 0.3 且 p < 0.05）。这表明变量间相对独立，或存在非线性关系。\n\n"
                
                # 相关性矩阵摘要
                if "correlation_matrix" in corr_analysis:
                    content += "### 📈 相关性分布\n\n"
                    corr_matrix = corr_analysis["correlation_matrix"]
                    
                    # 统计不同强度的相关性
                    strong_count = 0
                    moderate_count = 0
                    weak_count = 0
                    
                    for col1 in corr_matrix:
                        for col2 in corr_matrix[col1]:
                            if col1 != col2:
                                abs_corr = abs(corr_matrix[col1][col2])
                                if abs_corr >= 0.7:
                                    strong_count += 1
                                elif abs_corr >= 0.4:
                                    moderate_count += 1
                                elif abs_corr >= 0.2:
                                    weak_count += 1
                    
                    # 避免重复计算（上三角矩阵）
                    strong_count //= 2
                    moderate_count //= 2
                    weak_count //= 2
                    
                    content += f"- **强相关** (|r| ≥ 0.7): {strong_count} 对\n"
                    content += f"- **中等相关** (0.4 ≤ |r| < 0.7): {moderate_count} 对\n"
                    content += f"- **弱相关** (0.2 ≤ |r| < 0.4): {weak_count} 对\n\n"
            
            # 添加相关性分析的业务建议
            content += "### 💡 分析建议\n\n"
            content += "1. **强相关变量**: 考虑是否存在多重共线性问题，在建模时可能需要特征选择\n"
            content += "2. **负相关关系**: 关注反向变化的变量对，可能揭示重要的业务逻辑\n"
            content += "3. **独立变量**: 相关性较弱的变量可能提供独特信息，建议保留用于分析\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成相关性分析内容失败: {str(e)}")
            return "## 📊 相关性分析\n\n内容生成中..."
    
    def _generate_cluster_content(self, cluster_results: Dict[str, Any]) -> str:
        """生成聚类分析内容"""
        try:
            content = "## 🎯 聚类分析\n\n"
            
            if "cluster_summary" in cluster_results:
                summary = cluster_results["cluster_summary"]
                n_clusters = summary.get("n_clusters", 0)
                silhouette = summary.get("silhouette_score", 0)
                
                content += f"### 📊 聚类结果概览\n\n"
                content += f"- **聚类数量**: {n_clusters}\n"
                content += f"- **轮廓系数**: {silhouette:.3f}\n"
                
                # 聚类质量评估
                if silhouette >= 0.7:
                    quality_desc = "优秀"
                    quality_emoji = "🌟"
                elif silhouette >= 0.5:
                    quality_desc = "良好"
                    quality_emoji = "✅"
                elif silhouette >= 0.3:
                    quality_desc = "一般"
                    quality_emoji = "⚠️"
                else:
                    quality_desc = "较差"
                    quality_emoji = "❌"
                
                content += f"- **聚类质量评估**: {quality_emoji} {quality_desc}\n"
                
                if "cluster_sizes" in cluster_results:
                    sizes = cluster_results["cluster_sizes"]
                    content += "\n### 👥 各聚类规模分析\n\n"
                    content += "| 聚类ID | 样本数量 | 占比 | 规模评估 |\n"
                    content += "|--------|----------|------|----------|\n"
                    
                    total_samples = sum(sizes)
                    max_size = max(sizes)
                    min_size = min(sizes)
                    
                    for i, size in enumerate(sizes):
                        pct = (size / total_samples) * 100
                        
                        # 规模评估
                        if size == max_size and size > min_size * 2:
                            size_desc = "🔴 主导群体"
                        elif size == min_size and size < max_size * 0.5:
                            size_desc = "🔵 小众群体"
                        else:
                            size_desc = "🟡 均衡群体"
                        
                        content += f"| 聚类 {i+1} | {size:,} | {pct:.1f}% | {size_desc} |\n"
                    
                    # 群体分布特征分析
                    size_ratio = max_size / min_size if min_size > 0 else float('inf')
                    content += f"\n### 📈 群体分布特征\n\n"
                    content += f"- **最大群体规模**: {max_size:,} 个样本\n"
                    content += f"- **最小群体规模**: {min_size:,} 个样本\n"
                    content += f"- **规模差异比**: {size_ratio:.2f}\n"
                    
                    if size_ratio > 5:
                        content += f"- ⚠️ **群体规模差异较大**，可能存在异常群体或需要重新调整聚类参数\n"
                    elif size_ratio < 2:
                        content += f"- ✅ **群体规模分布均匀**，聚类结果较为理想\n"
                    else:
                        content += f"- 🔄 **群体规模适中**，分布相对合理\n"
                
                # 聚类算法信息
                algorithm = cluster_results.get('algorithm', 'K-means')
                content += f"\n### ⚙️ 算法信息\n\n"
                content += f"- **使用算法**: {algorithm}\n"
                
                if algorithm == 'K-means':
                    content += f"- **算法特点**: 基于距离的聚类，适合球形分布的数据\n"
                    if 'inertia' in cluster_results:
                        inertia = cluster_results['inertia']
                        content += f"- **群内平方和**: {inertia:.2f}（数值越小表示群体内部越紧密）\n"
                
                # 最优聚类数分析
                if 'elbow_scores' in cluster_results:
                    elbow_scores = cluster_results['elbow_scores']
                    content += f"\n### 📊 最优聚类数分析\n\n"
                    content += f"- 使用**肘部法则**评估最优聚类数\n"
                    content += f"- 测试了 **{len(elbow_scores)}** 种不同的聚类数\n"
                    content += f"- 当前选择的聚类数 **{n_clusters}** 在评估中表现最佳\n"
                
                # 聚类应用建议
                content += f"\n### 💡 应用建议\n\n"
                
                if silhouette >= 0.5:
                    content += f"- ✅ **聚类结果质量良好**，可用于进一步的群体分析\n"
                    content += f"- 🎯 建议针对不同群体制定**差异化策略**\n"
                    content += f"- 📊 可进行**群体特征对比分析**，识别各群体的独特特征\n"
                    content += f"- 🔍 建议深入分析各群体的**行为模式**和**偏好差异**\n"
                else:
                    content += f"- ⚠️ **聚类质量一般**，建议优化：\n"
                    content += f"  - 🔄 尝试不同的聚类数（k值）\n"
                    content += f"  - 📏 考虑数据预处理（标准化、降维等）\n"
                    content += f"  - 🛠️ 尝试其他聚类算法（如层次聚类、DBSCAN等）\n"
                    content += f"  - 🎯 重新评估特征选择和数据质量\n"
                
                # 业务价值分析
                content += f"\n### 💼 业务价值\n\n"
                content += f"- 🎯 **精准营销**: 基于群体特征制定个性化营销策略\n"
                content += f"- 📊 **资源配置**: 根据群体规模和特征优化资源分配\n"
                content += f"- 🔍 **风险管理**: 识别高风险群体，制定针对性风控措施\n"
                content += f"- 📈 **产品优化**: 基于不同群体需求优化产品功能\n"
            
            else:
                content += "聚类分析结果暂不可用。建议检查数据质量和聚类参数设置。\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成聚类分析内容失败: {str(e)}")
            return "## 🎯 聚类分析\n\n内容生成中..."
    
    def _generate_ai_insights_content(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> str:
        """生成AI洞察内容"""
        try:
            insights = self.generate_insights(analysis_results)
            
            content = "## 🤖 AI智能洞察\n\n"
            
            if insights:
                high_priority = [i for i in insights if i.importance == 'high']
                medium_priority = [i for i in insights if i.importance == 'medium']
                low_priority = [i for i in insights if i.importance == 'low']
                
                # 洞察概览
                total_insights = len(insights)
                content += f"### 📈 洞察概览\n\n"
                content += f"- **总洞察数量**: {total_insights}\n"
                content += f"- **高重要性**: {len(high_priority)} 个 🔥\n"
                content += f"- **中等重要性**: {len(medium_priority)} 个 📊\n"
                content += f"- **低重要性**: {len(low_priority)} 个 💡\n\n"
                
                # 高优先级洞察
                if high_priority:
                    content += "### 🔥 高重要性洞察\n\n"
                    content += "这些洞察对业务决策具有重要影响，建议优先关注：\n\n"
                    for i, insight in enumerate(high_priority, 1):
                        content += f"#### {i}. {insight.title}\n\n"
                        content += f"**📝 详细描述**: {insight.description}\n\n"
                        
                        # 添加置信度信息（如果有）
                        if hasattr(insight, 'confidence') and insight.confidence:
                            confidence_emoji = "🎯" if insight.confidence >= 0.8 else "📊" if insight.confidence >= 0.6 else "⚠️"
                            content += f"**{confidence_emoji} 置信度**: {insight.confidence:.1%}\n\n"
                        
                        # 添加影响评估（如果有）
                        if hasattr(insight, 'impact') and insight.impact:
                            impact_emoji = "🚀" if insight.impact == "high" else "📈" if insight.impact == "medium" else "💡"
                            content += f"**{impact_emoji} 业务影响**: {insight.impact}\n\n"
                        
                        # 添加数据支撑（如果有）
                        if hasattr(insight, 'supporting_data') and insight.supporting_data:
                            content += f"**📊 数据支撑**: {insight.supporting_data}\n\n"
                        
                        # 添加建议行动（如果有）
                        if hasattr(insight, 'recommendations') and insight.recommendations:
                            content += f"**🎯 建议行动**: {insight.recommendations}\n\n"
                        
                        content += "---\n\n"
                
                # 中等优先级洞察
                if medium_priority:
                    content += "### 📊 中等重要性洞察\n\n"
                    content += "这些洞察提供有价值的信息，建议在资源允许时关注：\n\n"
                    for i, insight in enumerate(medium_priority[:5], 1):  # 显示前5个
                        content += f"**{i}. {insight.title}**\n\n"
                        content += f"   {insight.description}\n"
                        
                        if hasattr(insight, 'confidence') and insight.confidence:
                            confidence_emoji = "🎯" if insight.confidence >= 0.8 else "📊" if insight.confidence >= 0.6 else "⚠️"
                            content += f"   *{confidence_emoji} 置信度: {insight.confidence:.1%}*\n"
                        
                        content += "\n"
                
                # 低优先级洞察
                if low_priority:
                    content += "### 💡 其他洞察\n\n"
                    content += "这些洞察提供补充信息，可作为参考：\n\n"
                    for i, insight in enumerate(low_priority[:3], 1):  # 显示前3个
                        content += f"- **{insight.title}**: {insight.description}\n"
                    content += "\n"
                
                # AI分析方法说明
                content += "### 🔬 AI分析方法\n\n"
                content += "本AI洞察基于以下分析技术生成：\n\n"
                content += "- **📊 统计模式识别**: 自动识别数据中的统计异常和模式\n"
                content += "- **🔍 关联性挖掘**: 发现变量间的隐藏关联关系\n"
                content += "- **📈 趋势分析**: 识别数据中的趋势和周期性模式\n"
                content += "- **🎯 异常检测**: 自动标识数据中的异常值和离群点\n"
                content += "- **💡 业务逻辑推理**: 结合业务场景进行智能推理\n\n"
                
                # 洞察质量评估
                if total_insights > 0:
                    # 计算平均置信度（如果有）
                    confidences = [getattr(insight, 'confidence', 0) for insight in insights if hasattr(insight, 'confidence')]
                    if confidences:
                        avg_confidence = sum(confidences) / len(confidences)
                        high_confidence_count = sum(1 for c in confidences if c >= 0.7)
                        
                        content += "### 📋 洞察质量评估\n\n"
                        content += f"- **平均置信度**: {avg_confidence:.1%}\n"
                        content += f"- **高置信度洞察**: {high_confidence_count}/{len(confidences)} 个\n"
                        
                        if avg_confidence >= 0.7:
                            content += f"- ✅ **整体质量**: 优秀，洞察可信度高\n"
                        elif avg_confidence >= 0.5:
                            content += f"- 📊 **整体质量**: 良好，大部分洞察可信\n"
                        else:
                            content += f"- ⚠️ **整体质量**: 一般，建议结合专业判断\n"
                        
                        content += "\n"
                
                # 使用建议
                content += "### 💼 使用建议\n\n"
                content += "- 🎯 **优先级排序**: 优先关注高重要性和高置信度的洞察\n"
                content += "- 🔍 **深入验证**: 对关键洞察进行进一步的数据验证\n"
                content += "- 📊 **交叉验证**: 结合领域专家知识进行交叉验证\n"
                content += "- 🚀 **行动导向**: 将洞察转化为具体的业务行动计划\n"
                content += "- 📈 **持续监控**: 建立指标监控体系，跟踪洞察的实际效果\n"
            
            else:
                content += "AI洞察分析暂不可用。建议检查数据质量和分析配置。\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成AI洞察内容失败: {str(e)}")
            return "## 🤖 AI智能洞察\n\n内容生成中..."
    
    def _generate_recommendations_content(self, analysis_results: Dict[str, Any], cluster_results: Optional[Dict[str, Any]] = None) -> str:
        """生成推荐建议内容"""
        try:
            recommendations = self._generate_recommendations(analysis_results)
            
            content = "## 🎯 智能推荐建议\n\n"
            
            if recommendations:
                # 建议概览
                content += f"### 📋 建议概览\n\n"
                content += f"基于数据分析结果，我们为您生成了 **{len(recommendations)}** 条智能建议：\n\n"
                
                # 按优先级分类建议
                high_priority_recs = []
                medium_priority_recs = []
                low_priority_recs = []
                
                for rec in recommendations:
                    if "紧急" in rec or "立即" in rec or "优先" in rec or "重要" in rec:
                        high_priority_recs.append(rec)
                    elif "建议" in rec or "考虑" in rec or "可以" in rec:
                        medium_priority_recs.append(rec)
                    else:
                        low_priority_recs.append(rec)
                
                # 高优先级建议
                if high_priority_recs:
                    content += "### 🚨 高优先级建议\n\n"
                    content += "这些建议需要立即关注和执行：\n\n"
                    for i, rec in enumerate(high_priority_recs, 1):
                        content += f"#### {i}. 🔴 {rec}\n\n"
                        content += f"**执行时间**: 立即执行\n"
                        content += f"**重要程度**: 高\n"
                        content += f"**预期影响**: 显著改善关键指标\n\n"
                        content += "---\n\n"
                
                # 中优先级建议
                if medium_priority_recs:
                    content += "### 📊 中优先级建议\n\n"
                    content += "这些建议在资源允许时应当考虑实施：\n\n"
                    for i, rec in enumerate(medium_priority_recs, 1):
                        content += f"**{i}. 🟡 {rec}**\n\n"
                        content += f"   - **执行时间**: 1-4周内\n"
                        content += f"   - **重要程度**: 中等\n"
                        content += f"   - **预期影响**: 优化现有流程\n\n"
                
                # 低优先级建议
                if low_priority_recs:
                    content += "### 💡 长期优化建议\n\n"
                    content += "这些建议可作为长期优化方向：\n\n"
                    for i, rec in enumerate(low_priority_recs, 1):
                        content += f"- 🔵 {rec}\n"
                    content += "\n"
                
                # 如果没有分类，显示所有建议
                if not (high_priority_recs or medium_priority_recs or low_priority_recs):
                    content += "### 📝 行动建议\n\n"
                    for i, rec in enumerate(recommendations, 1):
                        content += f"#### {i}. {rec}\n\n"
                        content += f"**建议类型**: 数据驱动建议\n"
                        content += f"**实施难度**: 待评估\n"
                        content += f"**预期收益**: 基于数据分析预测\n\n"
                
                # 基于聚类结果的额外建议
                if cluster_results and "cluster_summary" in cluster_results:
                    cluster_summary = cluster_results["cluster_summary"]
                    n_clusters = cluster_summary.get("n_clusters", 0)
                    silhouette = cluster_summary.get("silhouette_score", 0)
                    
                    content += "### 🎯 聚类分析建议\n\n"
                    
                    if silhouette >= 0.5:
                        content += f"✅ **聚类质量良好**（轮廓系数: {silhouette:.3f}）\n\n"
                        content += f"**建议行动**:\n"
                        content += f"- 🎯 针对 {n_clusters} 个不同群体制定差异化策略\n"
                        content += f"- 📊 深入分析各群体的特征差异\n"
                        content += f"- 🚀 为每个群体设计个性化的产品或服务\n"
                        content += f"- 📈 建立群体特异性的KPI监控体系\n\n"
                    else:
                        content += f"⚠️ **聚类质量一般**（轮廓系数: {silhouette:.3f}）\n\n"
                        content += f"**优化建议**:\n"
                        content += f"- 🔄 重新评估聚类参数，尝试不同的k值\n"
                        content += f"- 📏 考虑数据预处理（标准化、特征选择等）\n"
                        content += f"- 🛠️ 尝试其他聚类算法（层次聚类、DBSCAN等）\n"
                        content += f"- 🎯 重新审视特征工程和数据质量\n\n"
                
                # 实施指南
                content += "### 📋 实施指南\n\n"
                content += "**第一阶段（立即执行）**:\n"
                content += "- 🚨 优先处理高优先级建议\n"
                content += "- 📊 建立关键指标监控体系\n"
                content += "- 👥 组建跨部门执行团队\n\n"
                
                content += "**第二阶段（1-4周）**:\n"
                content += "- 📈 实施中优先级建议\n"
                content += "- 🔍 评估第一阶段执行效果\n"
                content += "- 🔄 根据反馈调整策略\n\n"
                
                content += "**第三阶段（长期）**:\n"
                content += "- 💡 逐步实施长期优化建议\n"
                content += "- 📊 持续监控和数据收集\n"
                content += "- 🎯 定期重新评估和优化\n\n"
                
                # 成功指标
                content += "### 📈 成功指标\n\n"
                content += "建议建立以下指标来衡量建议实施效果：\n\n"
                content += "- **📊 数据质量指标**: 缺失率、异常值比例、数据一致性\n"
                content += "- **🎯 业务效果指标**: 转化率、满意度、效率提升\n"
                content += "- **💰 经济效益指标**: 成本节约、收入增长、ROI\n"
                content += "- **⏱️ 执行进度指标**: 完成率、时间达成率、里程碑达成\n\n"
                
                # 风险提示
                content += "### ⚠️ 风险提示\n\n"
                content += "在实施建议时，请注意以下风险：\n\n"
                content += "- 🔍 **数据依赖风险**: 建议基于当前数据，需持续验证\n"
                content += "- 📊 **样本偏差风险**: 注意数据代表性和时效性\n"
                content += "- 🎯 **执行复杂性**: 某些建议可能需要跨部门协调\n"
                content += "- 💰 **资源约束风险**: 评估实施所需的人力和财力资源\n"
                content += "- ⏱️ **时间敏感性**: 部分建议可能存在时效性要求\n"
            
            else:
                content += "### 📝 分析说明\n\n"
                content += "基于当前数据分析结果，系统暂未识别出特定的行动建议。\n\n"
                content += "**可能原因**:\n"
                content += "- 📊 数据模式相对稳定，无明显异常\n"
                content += "- 🎯 分析结果在预期范围内\n"
                content += "- 🔍 需要更深入的专业领域分析\n\n"
                
                content += "**建议下一步**:\n"
                content += "- 📈 持续监控关键指标变化\n"
                content += "- 🔄 定期重新运行分析\n"
                content += "- 👥 结合业务专家进行深度解读\n"
                content += "- 📊 考虑引入更多维度的数据\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成推荐建议内容失败: {str(e)}")
            return "## 🎯 智能推荐建议\n\n内容生成中..."
    
    def _generate_methodology_content(self) -> str:
        """生成方法论内容"""
        content = "## 🔬 分析方法论\n\n"
        
        content += "本报告采用多层次、多维度的数据分析方法论，确保分析结果的科学性和可靠性。\n\n"
        
        content += "### 📊 统计分析方法\n\n"
        content += "#### 1. 描述性统计分析\n\n"
        content += "- **中心趋势测量**: 均值、中位数、众数\n"
        content += "- **离散程度测量**: 标准差、方差、变异系数\n"
        content += "- **分布形状测量**: 偏度（Skewness）、峰度（Kurtosis）\n"
        content += "- **分位数分析**: 四分位数、百分位数分布\n"
        content += "- **数据质量评估**: 缺失值、重复值、异常值统计\n\n"
        
        content += "#### 2. 相关性分析\n\n"
        content += "- **Pearson相关系数**: 测量线性相关关系强度\n"
        content += "- **Spearman等级相关**: 测量单调关系，适用于非正态分布\n"
        content += "- **Kendall's τ相关**: 基于一致性对的非参数相关测量\n"
        content += "- **显著性检验**: P值检验相关性的统计显著性\n"
        content += "- **相关性矩阵**: 全变量相关关系可视化\n\n"
        
        content += "#### 3. 假设检验\n\n"
        content += "- **正态性检验**: Shapiro-Wilk、Kolmogorov-Smirnov、Anderson-Darling检验\n"
        content += "- **单样本t检验**: 检验样本均值与总体均值的差异\n"
        content += "- **独立样本t检验**: 比较两组独立样本的均值差异\n"
        content += "- **卡方检验**: 检验分类变量间的独立性\n"
        content += "- **方差分析(ANOVA)**: 比较多组样本均值的差异\n\n"
        
        content += "#### 4. 异常值检测\n\n"
        content += "- **IQR方法**: 基于四分位距识别异常值\n"
        content += "- **Z-Score方法**: 基于标准分数识别极端值\n"
        content += "- **统计阈值**: 通常以1.5×IQR或3σ为判断标准\n"
        content += "- **多维异常检测**: 考虑变量间相互作用的异常模式\n\n"
        
        content += "### 🎯 聚类分析方法\n\n"
        content += "#### 1. K-means聚类算法\n\n"
        content += "- **算法原理**: 基于距离的无监督学习算法\n"
        content += "- **目标函数**: 最小化群内平方和(WCSS)\n"
        content += "- **迭代过程**: 质心更新与样本重分配\n"
        content += "- **收敛条件**: 质心位置稳定或达到最大迭代次数\n"
        content += "- **适用场景**: 球形分布、大小相近的聚类\n\n"
        
        content += "#### 2. 聚类质量评估\n\n"
        content += "- **轮廓系数(Silhouette Score)**: 衡量聚类内聚性和分离性\n"
        content += "  - 取值范围: [-1, 1]\n"
        content += "  - >0.7: 优秀聚类\n"
        content += "  - 0.5-0.7: 良好聚类\n"
        content += "  - 0.3-0.5: 一般聚类\n"
        content += "  - <0.3: 较差聚类\n"
        content += "- **群内平方和(WCSS)**: 衡量聚类紧密程度\n"
        content += "- **Calinski-Harabasz指数**: 群间与群内方差比\n\n"
        
        content += "#### 3. 最优聚类数确定\n\n"
        content += "- **肘部法则(Elbow Method)**: 寻找WCSS下降拐点\n"
        content += "- **轮廓系数法**: 选择轮廓系数最大的k值\n"
        content += "- **Gap统计量**: 比较实际数据与随机数据的聚类效果\n"
        content += "- **交叉验证**: 多次随机初始化验证聚类稳定性\n\n"
        
        content += "### 🤖 AI智能分析\n\n"
        content += "#### 1. 智能模式识别\n\n"
        content += "- **统计模式挖掘**: 自动识别数据中的统计异常和规律\n"
        content += "- **关联规则挖掘**: 发现变量间的隐藏关联关系\n"
        content += "- **趋势模式分析**: 识别时间序列和周期性模式\n"
        content += "- **异常模式检测**: 自动标识偏离正常模式的数据点\n\n"
        
        content += "#### 2. 洞察重要性评估\n\n"
        content += "- **统计显著性**: 基于P值和置信区间评估\n"
        content += "- **效应大小**: 评估实际业务影响程度\n"
        content += "- **业务相关性**: 结合领域知识评估业务价值\n"
        content += "- **置信度评分**: 综合多个指标的可信度评估\n\n"
        
        content += "#### 3. 智能推荐生成\n\n"
        content += "- **规则引擎**: 基于预定义规则生成建议\n"
        content += "- **模式匹配**: 根据识别的模式匹配最佳实践\n"
        content += "- **优先级排序**: 基于影响程度和实施难度排序\n"
        content += "- **个性化建议**: 根据数据特征定制化建议内容\n\n"
        
        content += "### 📋 数据预处理\n\n"
        content += "#### 1. 数据清洗\n\n"
        content += "- **缺失值处理**: 删除、填充或插值方法\n"
        content += "- **重复值处理**: 识别并处理重复记录\n"
        content += "- **异常值处理**: 基于业务逻辑和统计方法处理\n"
        content += "- **数据类型转换**: 确保数据类型的正确性\n\n"
        
        content += "#### 2. 数据标准化\n\n"
        content += "- **Z-score标准化**: 均值为0，标准差为1\n"
        content += "- **Min-Max标准化**: 缩放到[0,1]区间\n"
        content += "- **鲁棒标准化**: 基于中位数和四分位距\n"
        content += "- **单位向量缩放**: 缩放到单位长度\n\n"
        
        content += "### 🔍 质量控制\n\n"
        content += "#### 1. 分析可靠性\n\n"
        content += "- **重现性检验**: 多次运行确保结果一致性\n"
        content += "- **敏感性分析**: 评估参数变化对结果的影响\n"
        content += "- **交叉验证**: 使用不同数据子集验证结果\n"
        content += "- **置信区间**: 提供结果的不确定性估计\n\n"
        
        content += "#### 2. 结果验证\n\n"
        content += "- **统计检验**: 使用适当的统计检验方法\n"
        content += "- **效应大小**: 评估统计显著性的实际意义\n"
        content += "- **多重比较校正**: 控制家族错误率\n"
        content += "- **业务逻辑验证**: 结合领域知识验证合理性\n\n"
        
        content += "### 📊 报告生成\n\n"
        content += "#### 1. 自动化流程\n\n"
        content += "- **模板化设计**: 标准化报告结构和格式\n"
        content += "- **动态内容生成**: 基于数据特征自适应内容\n"
        content += "- **智能摘要**: 自动提取关键发现和洞察\n"
        content += "- **可视化集成**: 自动生成相关图表和可视化\n\n"
        
        content += "#### 2. 质量保证\n\n"
        content += "- **内容一致性**: 确保数据和文本的一致性\n"
        content += "- **格式标准化**: 统一的格式和样式\n"
        content += "- **错误检查**: 自动检测潜在的错误和异常\n"
        content += "- **版本控制**: 跟踪报告的版本和变更历史\n"
        
        return content
    
    def _generate_key_findings_content(self, analysis_results: Dict[str, Any]) -> str:
        """生成关键发现内容"""
        try:
            findings = self._extract_key_findings(analysis_results)
            
            content = "## 关键发现\n\n"
            
            if findings:
                for i, finding in enumerate(findings, 1):
                    content += f"{i}. {finding}\n"
            else:
                content += "暂无特殊发现。\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成关键发现内容失败: {str(e)}")
            return "## 关键发现\n\n内容生成中..."
    
    def _generate_data_highlights_content(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> str:
        """生成数据亮点内容"""
        try:
            content = "## 📋 数据概览与亮点\n\n"
            
            # 数据规模亮点
            content += f"### 📈 数据规模\n\n"
            content += f"- 分析了 **{len(data):,}** 个样本\n"
            content += f"- 涵盖 **{len(data.columns)}** 个变量\n"
            content += f"- 数据集大小：**{data.memory_usage(deep=True).sum() / 1024 / 1024:.2f} MB**\n"
            
            # 数据质量详细分析
            missing_pct = (data.isnull().sum().sum() / (len(data) * len(data.columns))) * 100
            duplicate_count = data.duplicated().sum()
            content += f"\n### ✅ 数据质量评估\n\n"
            content += f"- 数据完整性：**{100-missing_pct:.1f}%**\n"
            content += f"- 重复记录：**{duplicate_count:,}** 条 ({duplicate_count/len(data)*100:.2f}%)\n"
            
            # 缺失值详细分析
            missing_by_col = data.isnull().sum()
            high_missing_cols = missing_by_col[missing_by_col > len(data) * 0.1].sort_values(ascending=False)
            if len(high_missing_cols) > 0:
                content += f"- 高缺失率变量（>10%）：**{len(high_missing_cols)}** 个\n"
                for col, missing_count in high_missing_cols.head(5).items():
                    missing_rate = missing_count / len(data) * 100
                    content += f"  - {col}: {missing_rate:.1f}% 缺失\n"
            
            # 变量类型详细分布
            numeric_cols = data.select_dtypes(include=[np.number]).columns
            categorical_cols = data.select_dtypes(include=['object', 'category']).columns
            datetime_cols = data.select_dtypes(include=['datetime64']).columns
            
            content += f"\n### 🔢 变量类型分布\n\n"
            content += f"- **数值型变量**：{len(numeric_cols)} 个\n"
            if len(numeric_cols) > 0:
                content += f"  - 连续变量示例：{', '.join(list(numeric_cols)[:3])}\n"
            content += f"- **分类型变量**：{len(categorical_cols)} 个\n"
            if len(categorical_cols) > 0:
                content += f"  - 分类变量示例：{', '.join(list(categorical_cols)[:3])}\n"
            if len(datetime_cols) > 0:
                content += f"- **时间型变量**：{len(datetime_cols)} 个\n"
            
            # 数值变量统计摘要
            if len(numeric_cols) > 0:
                content += f"\n### 📊 数值变量统计摘要\n\n"
                numeric_data = data[numeric_cols]
                content += f"- 平均值范围：{numeric_data.mean().min():.2f} ~ {numeric_data.mean().max():.2f}\n"
                content += f"- 标准差范围：{numeric_data.std().min():.2f} ~ {numeric_data.std().max():.2f}\n"
                
                # 检查是否有异常值
                outlier_counts = []
                for col in numeric_cols:
                    Q1 = data[col].quantile(0.25)
                    Q3 = data[col].quantile(0.75)
                    IQR = Q3 - Q1
                    outliers = data[(data[col] < Q1 - 1.5*IQR) | (data[col] > Q3 + 1.5*IQR)][col]
                    outlier_counts.append(len(outliers))
                
                total_outliers = sum(outlier_counts)
                content += f"- 检测到异常值：**{total_outliers:,}** 个\n"
            
            # 分类变量摘要
            if len(categorical_cols) > 0:
                content += f"\n### 🏷️ 分类变量摘要\n\n"
                unique_counts = []
                for col in categorical_cols:
                    unique_counts.append(data[col].nunique())
                
                content += f"- 平均类别数：**{np.mean(unique_counts):.1f}**\n"
                content += f"- 类别数范围：{min(unique_counts)} ~ {max(unique_counts)}\n"
                
                # 高基数分类变量
                high_cardinality = [col for col in categorical_cols if data[col].nunique() > len(data) * 0.1]
                if high_cardinality:
                    content += f"- 高基数变量（类别数>10%样本数）：{len(high_cardinality)} 个\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成数据亮点内容失败: {str(e)}")
            return "## 📋 数据概览与亮点\n\n内容生成中..."
    
    def _generate_business_impact_content(self, analysis_results: Dict[str, Any]) -> str:
        """生成业务影响内容"""
        try:
            content = "## 业务影响\n\n"
            
            # 基于分析结果生成业务影响评估
            insights = self.generate_insights(analysis_results)
            high_impact_insights = [i for i in insights if i.importance == 'high']
            
            if high_impact_insights:
                content += "### 🎯 高影响发现\n\n"
                for insight in high_impact_insights:
                    content += f"**{insight.title}**\n\n"
                    content += f"{insight.description}\n\n"
                    content += "**业务建议**: 建议优先关注此发现，制定相应的业务策略。\n\n"
            else:
                content += "当前分析结果显示数据模式相对稳定，建议持续监控关键指标。\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成业务影响内容失败: {str(e)}")
            return "## 业务影响\n\n内容生成中..."
    
    def _generate_priority_actions_content(self, analysis_results: Dict[str, Any]) -> str:
        """生成优先行动内容"""
        try:
            content = "## 优先行动\n\n"
            
            recommendations = self._generate_recommendations(analysis_results)
            insights = self.generate_insights(analysis_results)
            
            # 基于高优先级洞察生成行动建议
            high_priority_insights = [i for i in insights if i.importance == 'high']
            
            if high_priority_insights or recommendations:
                content += "### 🚀 立即行动\n\n"
                
                action_count = 1
                
                # 基于高优先级洞察的行动
                for insight in high_priority_insights[:2]:  # 最多2个
                    content += f"{action_count}. **针对{insight.title}**: 建议深入调研相关业务流程，制定改进方案。\n"
                    action_count += 1
                
                # 基于推荐建议的行动
                for rec in recommendations[:3]:  # 最多3个
                    content += f"{action_count}. {rec}\n"
                    action_count += 1
                
                content += "\n### 📅 中期规划\n\n"
                content += "- 建立数据监控体系，定期跟踪关键指标变化\n"
                content += "- 完善数据收集流程，提高数据质量\n"
                content += "- 开展更深入的专项分析，挖掘更多业务价值\n"
            else:
                content += "基于当前分析结果，建议：\n\n"
                content += "1. 继续收集更多数据以获得更深入的洞察\n"
                content += "2. 定期重复分析以监控趋势变化\n"
                content += "3. 考虑引入更多相关变量进行分析\n"
            
            return content
            
        except Exception as e:
            logging.error(f"生成优先行动内容失败: {str(e)}")
            return "## 优先行动\n\n内容生成中..."
    
    def _generate_detailed_analysis(self, analysis_results: Dict[str, Any]) -> Dict[str, Any]:
        """生成详细分析章节"""
        detailed = {}
        
        try:
            # 统计分析详情
            if 'statistical_analysis' in analysis_results:
                stats = analysis_results['statistical_analysis']
                detailed['statistical'] = {
                    'summary': '完成了全面的描述性统计分析',
                    'key_findings': self._extract_statistical_findings(stats),
                    'significance_tests': stats.get('hypothesis_tests', {}),
                    'normality_tests': stats.get('normality_testing', {}),
                    'correlation_details': stats.get('correlation_analysis', {})
                }
            
            # 聚类分析详情
            if 'cluster_analysis' in analysis_results:
                cluster = analysis_results['cluster_analysis']
                detailed['clustering'] = {
                    'summary': f"识别出 {cluster.get('n_clusters', 0)} 个不同的数据群体",
                    'cluster_characteristics': cluster.get('cluster_centers', {}),
                    'quality_metrics': {
                        'silhouette_score': cluster.get('silhouette_score', 0),
                        'inertia': cluster.get('inertia', 0)
                    }
                }
            
            # 文本分析详情
            if 'text_analysis' in analysis_results:
                text = analysis_results['text_analysis']
                detailed['text'] = {
                    'summary': '完成了文本内容的深度分析',
                    'sentiment_distribution': text.get('sentiment_analysis', {}),
                    'key_topics': text.get('topic_modeling', {}),
                    'important_keywords': text.get('keywords', [])
                }
            
            return detailed
            
        except Exception as e:
            self.logger.error(f"详细分析生成失败: {e}")
            return {}
    
    def _extract_statistical_findings(self, stats: Dict[str, Any]) -> List[str]:
        """提取统计分析的关键发现"""
        findings = []
        
        try:
            # 描述性统计发现
            if 'descriptive_statistics' in stats or 'descriptive_stats' in stats:
                desc_stats = stats.get('descriptive_statistics', stats.get('descriptive_stats', {}))
                if desc_stats:
                    findings.append(f"✅ 完成了 **{len(desc_stats)}** 个变量的描述性统计分析")
                    
                    # 添加变异系数分析
                    high_variance_vars = []
                    for var_name, var_stats in desc_stats.items():
                        if isinstance(var_stats, dict) and 'std' in var_stats and 'mean' in var_stats:
                            if var_stats['mean'] != 0:
                                cv = abs(var_stats['std'] / var_stats['mean'])
                                if cv > 0.5:  # 变异系数大于0.5认为是高变异
                                    high_variance_vars.append(var_name)
                    
                    if high_variance_vars:
                        findings.append(f"📊 发现 **{len(high_variance_vars)}** 个高变异性变量（变异系数>0.5）")
                    
                    # 分布特征分析
                    skewed_vars = []
                    for var, stat_dict in desc_stats.items():
                        if isinstance(stat_dict, dict) and 'skewness' in stat_dict:
                            if abs(stat_dict['skewness']) > 1:
                                skewed_vars.append(var)
                    
                    if skewed_vars:
                        findings.append(f"📈 发现 **{len(skewed_vars)}** 个变量存在明显偏态分布（|偏度| > 1）")
            
            # 相关性分析详细发现
            if 'correlation_analysis' in stats:
                corr_analysis = stats['correlation_analysis']
                
                # 强相关关系
                if 'strong_correlations' in corr_analysis:
                    strong_corr_count = len(corr_analysis['strong_correlations'])
                    findings.append(f"🔗 发现 **{strong_corr_count}** 对强相关关系（|r| > 0.7）")
                
                # 相关性分布统计
                if 'correlation_matrix' in corr_analysis:
                    corr_matrix = corr_analysis['correlation_matrix']
                    if hasattr(corr_matrix, 'values'):
                        # 计算相关性分布
                        corr_values = corr_matrix.values
                        # 排除对角线元素
                        mask = ~np.eye(corr_values.shape[0], dtype=bool)
                        corr_flat = corr_values[mask]
                        
                        strong_corr = np.sum(np.abs(corr_flat) > 0.7)
                        moderate_corr = np.sum((np.abs(corr_flat) > 0.3) & (np.abs(corr_flat) <= 0.7))
                        weak_corr = np.sum(np.abs(corr_flat) <= 0.3)
                        
                        findings.append(f"📈 相关性分布：强相关 **{strong_corr}** 对，中等相关 **{moderate_corr}** 对，弱相关 **{weak_corr}** 对")
                
                # 显著性分析
                if 'significance_matrix' in corr_analysis:
                    sig_matrix = corr_analysis['significance_matrix']
                    if hasattr(sig_matrix, 'values'):
                        sig_values = sig_matrix.values
                        mask = ~np.eye(sig_values.shape[0], dtype=bool)
                        sig_flat = sig_values[mask]
                        significant_corr = np.sum(sig_flat < 0.05)
                        findings.append(f"⭐ **{significant_corr}** 对相关关系在统计上显著（p < 0.05）")
            
            # 处理旧格式的相关性矩阵
            elif 'correlation_matrix' in stats:
                corr_matrix = stats['correlation_matrix']
                if isinstance(corr_matrix, dict):
                    high_corr_pairs = []
                    for var1, corr_dict in corr_matrix.items():
                        for var2, corr_val in corr_dict.items():
                            if var1 != var2 and abs(corr_val) > 0.7:
                                high_corr_pairs.append((var1, var2, corr_val))
                    
                    if high_corr_pairs:
                        findings.append(f"🔗 发现 **{len(high_corr_pairs)}** 对变量存在强相关关系")
            
            # 正态性检验发现
            if 'normality_testing' in stats:
                norm_tests = stats['normality_testing']
                if isinstance(norm_tests, dict):
                    normal_vars = []
                    non_normal_vars = []
                    
                    for var_name, test_results in norm_tests.items():
                        if isinstance(test_results, dict) and 'overall_assessment' in test_results:
                            if test_results['overall_assessment'].get('is_normal', False):
                                normal_vars.append(var_name)
                            else:
                                non_normal_vars.append(var_name)
                    
                    if normal_vars or non_normal_vars:
                        findings.append(f"📊 正态性检验：**{len(normal_vars)}** 个变量符合正态分布，**{len(non_normal_vars)}** 个变量不符合")
            
            # 假设检验详细发现
            if 'hypothesis_tests' in stats:
                hyp_tests = stats['hypothesis_tests']
                significant_tests = []
                test_types = {}
                
                for test_name, test_result in hyp_tests.items():
                    if isinstance(test_result, dict):
                        # 统计检验类型
                        test_type = test_result.get('test_type', 'unknown')
                        test_types[test_type] = test_types.get(test_type, 0) + 1
                        
                        # 检查显著性
                        p_value = test_result.get('p_value', 1)
                        if p_value < 0.05:
                            significant_tests.append({
                                'name': test_name,
                                'p_value': p_value,
                                'test_type': test_type
                            })
                
                findings.append(f"🧪 进行了 **{len(hyp_tests)}** 项假设检验，其中 **{len(significant_tests)}** 项显著（p < 0.05）")
                
                # 按检验类型分类
                if test_types:
                    type_summary = ', '.join([f"{test_type}: {count}项" for test_type, count in test_types.items()])
                    findings.append(f"📋 检验类型分布：{type_summary}")
                
                # 列出显著的检验结果
                if significant_tests:
                    sig_summary = []
                    for test in significant_tests[:3]:  # 只显示前3个
                        sig_summary.append(f"{test['name']} (p={test['p_value']:.4f})")
                    if len(significant_tests) > 3:
                        sig_summary.append(f"等{len(significant_tests)}项")
                    findings.append(f"⚡ 显著检验结果：{', '.join(sig_summary)}")
            
            # 异常值检测发现
            if 'outlier_analysis' in stats:
                outlier_analysis = stats['outlier_analysis']
                if isinstance(outlier_analysis, dict):
                    total_outliers = 0
                    outlier_vars = []
                    
                    for var_name, outlier_info in outlier_analysis.items():
                        if isinstance(outlier_info, dict) and 'outlier_count' in outlier_info:
                            outlier_count = outlier_info['outlier_count']
                            total_outliers += outlier_count
                            if outlier_count > 0:
                                outlier_vars.append(var_name)
                    
                    if total_outliers > 0:
                        findings.append(f"⚠️ 检测到 **{total_outliers}** 个异常值，涉及 **{len(outlier_vars)}** 个变量")
            
            return findings if findings else ["✅ 统计分析已完成"]
            
        except Exception as e:
            self.logger.error(f"统计发现提取失败: {e}")
            return ["✅ 统计分析已完成"]
    
    def _assess_data_quality(self, data: pd.DataFrame) -> Dict[str, Any]:
        """评估数据质量"""
        try:
            quality_assessment = {
                'completeness': {
                    'total_cells': data.size,
                    'missing_cells': data.isnull().sum().sum(),
                    'completeness_rate': (1 - data.isnull().sum().sum() / data.size) * 100
                },
                'consistency': {
                    'duplicate_rows': data.duplicated().sum(),
                    'data_types': data.dtypes.value_counts().to_dict()
                },
                'validity': {
                    'numeric_ranges': self._check_numeric_ranges(data),
                    'categorical_values': self._check_categorical_values(data)
                }
            }
            
            return quality_assessment
            
        except Exception as e:
            self.logger.error(f"数据质量评估失败: {e}")
            return {}
    
    def _check_numeric_ranges(self, data: pd.DataFrame) -> Dict[str, Any]:
        """检查数值型变量的范围"""
        numeric_cols = data.select_dtypes(include=[np.number]).columns
        ranges = {}
        
        for col in numeric_cols:
            ranges[col] = {
                'min': float(data[col].min()),
                'max': float(data[col].max()),
                'has_negatives': (data[col] < 0).any(),
                'has_zeros': (data[col] == 0).any()
            }
        
        return ranges
    
    def _check_categorical_values(self, data: pd.DataFrame) -> Dict[str, Any]:
        """检查分类变量的取值"""
        categorical_cols = data.select_dtypes(include=['object', 'category']).columns
        categories = {}
        
        for col in categorical_cols:
            unique_values = data[col].unique()
            categories[col] = {
                'unique_count': len(unique_values),
                'most_frequent': data[col].mode().iloc[0] if len(data[col].mode()) > 0 else None,
                'has_nulls': data[col].isnull().any()
            }
        
        return categories
    
    def _summarize_statistical_tests(self, analysis_results: Dict[str, Any]) -> Dict[str, Any]:
        """总结统计检验结果"""
        tests_summary = {}
        
        try:
            if 'statistical_analysis' in analysis_results:
                stats = analysis_results['statistical_analysis']
                
                if 'hypothesis_tests' in stats:
                    tests = stats['hypothesis_tests']
                    tests_summary = {
                        'tests_performed': list(tests.keys()) if isinstance(tests, dict) else [],
                        'significant_results': [],
                        'methodology_notes': "使用了适当的统计检验方法，显著性水平设为0.05"
                    }
            
            return tests_summary
            
        except Exception as e:
            self.logger.error(f"统计检验总结失败: {e}")
            return {}
    
    def _generate_technical_notes(self, analysis_results: Dict[str, Any]) -> List[str]:
        """生成技术说明"""
        notes = [
            "本报告基于SurveyAnalyzer智能分析系统生成",
            "所有统计分析均采用业界标准方法和算法",
            "聚类分析使用K-means算法，并通过轮廓系数评估质量",
            "相关性分析采用Pearson相关系数",
            "异常值检测基于统计学方法（如IQR方法）"
        ]
        
        # 根据实际使用的分析模块添加相应说明
        if 'text_analysis' in analysis_results:
            notes.append("文本分析采用自然语言处理技术，包括情感分析和主题建模")
        
        if 'advanced_analysis' in analysis_results:
            notes.append("高级分析包括降维、时间序列分析等前沿方法")
        
        return notes
    
    def get_available_templates(self) -> List[Dict[str, Any]]:
        """获取可用的报告模板列表"""
        try:
            return ReportTemplates.get_all_templates()
        except Exception as e:
            self.logger.error(f"获取模板列表失败: {e}")
            return []
    
    def validate_template(self, template_id: str) -> bool:
        """验证模板是否有效"""
        try:
            template = ReportTemplates.get_template_by_id(template_id)
            return template is not None
        except Exception as e:
            self.logger.error(f"验证模板失败: {e}")
            return False
    
    def generate_smart_insights(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        生成智能洞察（更高级的洞察发现）
        
        Args:
            data: 原始数据
            analysis_results: 分析结果
            
        Returns:
            智能洞察列表
        """
        smart_insights = []
        
        try:
            # 数据分布洞察
            distribution_insights = self._analyze_distributions(data)
            smart_insights.extend(distribution_insights)
            
            # 趋势洞察
            trend_insights = self._analyze_trends(data)
            smart_insights.extend(trend_insights)
            
            # 异常模式洞察
            anomaly_insights = self._analyze_anomaly_patterns(data, analysis_results)
            smart_insights.extend(anomaly_insights)
            
            # 业务洞察
            business_insights = self._generate_business_insights(data, analysis_results)
            smart_insights.extend(business_insights)
            
            return smart_insights
            
        except Exception as e:
            self.logger.error(f"智能洞察生成失败: {e}")
            return []
    
    def _analyze_distributions(self, data: pd.DataFrame) -> List[Dict[str, Any]]:
        """分析数据分布特征"""
        insights = []
        
        try:
            numeric_cols = data.select_dtypes(include=[np.number]).columns
            
            for col in numeric_cols:
                series = data[col].dropna()
                if len(series) > 10:  # 确保有足够的数据
                    skewness = series.skew()
                    kurtosis = series.kurtosis()
                    
                    # 判断分布类型
                    if abs(skewness) < 0.5 and abs(kurtosis) < 3:
                        dist_type = 'normal'
                        description = f"变量 {col} 呈现正态分布特征，适合使用参数统计方法进行分析。"
                    elif abs(skewness) > 1:
                        dist_type = 'skewed'
                        description = f"变量 {col} 呈现偏态分布（偏度: {skewness:.3f}），建议考虑数据变换或非参数方法。"
                    else:
                        dist_type = 'other'
                        description = f"变量 {col} 呈现正态分布特征，适合使用参数统计方法进行分析。"
                    
                    insights.append({
                        'title': f'{col} 分布特征分析',
                        'description': description,
                        'type': 'distribution',
                        'importance': 'medium' if abs(skewness) > 1 else 'low',
                        'data': {
                            'variable': col,
                            'skewness': skewness,
                            'kurtosis': kurtosis,
                            'distribution_type': dist_type
                        }
                    })
            
            return insights
            
        except Exception as e:
            self.logger.error(f"分布分析失败: {e}")
            return []
    
    def _analyze_trends(self, data: pd.DataFrame) -> List[Dict[str, Any]]:
        """分析趋势模式"""
        insights = []
        
        try:
            # 如果数据有时间序列特征，分析趋势
            numeric_cols = data.select_dtypes(include=[np.number]).columns
            
            for col in numeric_cols:
                series = data[col].dropna()
                if len(series) > 5:
                    # 简单的趋势检测
                    x = np.arange(len(series))
                    y = series.values
                    
                    # 计算线性趋势
                    slope = np.polyfit(x, y, 1)[0]
                    
                    if abs(slope) > 0.01 * np.std(y):  # 趋势显著
                        if slope > 0:
                            rate = (slope * len(series) / np.mean(y)) * 100
                            description = f"发现 {col} 呈现明显的上升趋势，增长率约为 {rate:.2f}%。"
                        else:
                            rate = abs(slope * len(series) / np.mean(y)) * 100
                            description = f"发现 {col} 呈现明显的下降趋势，下降率约为 {rate:.2f}%。"
                        
                        insights.append({
                            'title': f'{col} 趋势分析',
                            'description': description,
                            'type': 'trend',
                            'importance': 'high' if abs(rate) > 10 else 'medium',
                            'data': {
                                'variable': col,
                                'slope': slope,
                                'trend_rate': rate,
                                'trend_direction': 'increasing' if slope > 0 else 'decreasing'
                            }
                        })
            
            return insights
            
        except Exception as e:
            self.logger.error(f"趋势分析失败: {e}")
            return []
    
    def _analyze_anomaly_patterns(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> List[Dict[str, Any]]:
        """分析异常模式"""
        insights = []
        
        try:
            # 从分析结果中获取异常值信息
            if 'data_quality' in analysis_results and 'outliers' in analysis_results['data_quality']:
                outliers = analysis_results['data_quality']['outliers']
                
                if isinstance(outliers, dict) and 'patterns' in outliers:
                    patterns = outliers['patterns']
                    
                    for pattern in patterns:
                        insights.append({
                            'title': '异常模式发现',
                            'description': f"在 {pattern.get('variable', '某变量')} 中发现异常模式：{pattern.get('description', '未知模式')}",
                            'type': 'anomaly',
                            'importance': 'high',
                            'data': pattern
                        })
            
            return insights
            
        except Exception as e:
            self.logger.error(f"异常模式分析失败: {e}")
            return []
    
    def _generate_business_insights(self, data: pd.DataFrame, analysis_results: Dict[str, Any]) -> List[Dict[str, Any]]:
        """生成业务洞察"""
        insights = []
        
        try:
            # 基于聚类结果的业务洞察
            if 'cluster_analysis' in analysis_results:
                cluster_results = analysis_results['cluster_analysis']
                n_clusters = cluster_results.get('n_clusters', 0)
                
                if n_clusters > 1:
                    insights.append({
                        'title': '用户群体细分发现',
                        'description': f"数据显示存在 {n_clusters} 个不同的用户群体，每个群体都有独特的特征和行为模式。建议针对不同群体制定差异化的策略。",
                        'type': 'business',
                        'importance': 'high',
                        'data': {
                            'cluster_count': n_clusters,
                            'business_impact': 'segmentation_opportunity'
                        }
                    })
            
            # 基于相关性的业务洞察
            if 'statistical_analysis' in analysis_results:
                stats = analysis_results['statistical_analysis']
                if 'correlation_matrix' in stats:
                    insights.append({
                        'title': '关键因素关联分析',
                        'description': '发现了多个变量之间的重要关联关系，这些关系可以用于预测和决策支持。建议深入分析这些关联的业务含义。',
                        'type': 'business',
                        'importance': 'medium',
                        'data': {
                            'analysis_type': 'correlation_insights',
                            'business_impact': 'predictive_opportunity'
                        }
                    })
            
            return insights
            
        except Exception as e:
            self.logger.error(f"业务洞察生成失败: {e}")
            return []


# 为了向后兼容，创建ReportGenerator别名
ReportGenerator = IntelligentReportGenerator