"""RESTful API接口模块

提供数据分析的API接口服务。
"""

import os
import json
import logging
from flask import Blueprint, request, jsonify, current_app
from werkzeug.utils import secure_filename
import pandas as pd
import numpy as np
from datetime import datetime

# 导入分析模块
try:
    import sys
    import os
    # 添加项目根目录到Python路径
    project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
    sys.path.insert(0, project_root)
    sys.path.insert(0, os.path.join(project_root, 'src'))
    
    from core.config import Config
    from data.processor import DataProcessor
    from analysis.statistics import StatisticalAnalyzer
    from analysis.clustering import ClusterAnalyzer
    from ai.nlp_analyzer import NLPAnalyzer
    # 导入报告生成器
    try:
        from ai.report_generator import IntelligentReportGenerator, ReportGenerator
        REPORT_GENERATOR_AVAILABLE = True
    except ImportError:
        # 如果无法导入，创建简单的报告生成器
        REPORT_GENERATOR_AVAILABLE = False
        class ReportGenerator:
            def __init__(self, config=None):
                self.config = config
            def generate_report(self, data, **kwargs):
                return {"error": "报告生成器不可用"}
    # 暂时注释掉可能不存在的模块
    # from visualization.charts import ChartGenerator
    # from utils.validators import DataValidator
    # from ai.chat_interface import ChatInterface
    # from ..analysis.advanced import DimensionalityAnalyzer, AnomalyDetector, TimeSeriesAnalyzer, AssociationRuleAnalyzer, RegressionAnalyzer
    # 暂时注释掉高级分析模块，避免相对导入错误
except ImportError as e:
    logging.warning(f"某些模块导入失败: {e}")

api_bp = Blueprint('api', __name__)
logger = logging.getLogger(__name__)

@api_bp.route('/test', methods=['GET'])
def test_api():
    """测试API端点"""
    return jsonify({'message': 'API正常工作', 'status': 'ok'})

@api_bp.route('/current-file', methods=['GET'])
def get_current_file():
    """获取当前会话中的文件信息"""
    from flask import session
    
    current_filename = session.get('current_filename')
    file_info = session.get('file_info')
    
    if current_filename and file_info:
        # 检查文件是否仍然存在
        filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], current_filename)
        if os.path.exists(filepath):
            return jsonify({
                'success': True,
                'file_info': file_info
            })
        else:
            # 文件不存在，清除会话信息
            session.pop('current_filename', None)
            session.pop('file_info', None)
    
    return jsonify({
        'success': False,
        'message': '没有当前文件'
    })

ALLOWED_EXTENSIONS = {'csv', 'xlsx', 'xls', 'json'}

def allowed_file(filename):
    """检查文件扩展名是否允许"""
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

def load_data(filename):
    """加载数据文件"""
    filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], filename)
    
    if not os.path.exists(filepath):
        raise FileNotFoundError(f"文件不存在: {filename}")
    
    if filename.endswith('.csv'):
        return pd.read_csv(filepath)
    elif filename.endswith(('.xlsx', '.xls')):
        return pd.read_excel(filepath)
    elif filename.endswith('.json'):
        return pd.read_json(filepath)
    else:
        raise ValueError(f"不支持的文件格式: {filename}")

@api_bp.route('/upload', methods=['POST'])
def upload_file():
    """文件上传API"""
    try:
        if 'file' not in request.files:
            return jsonify({'error': '没有文件'}), 400
        
        file = request.files['file']
        
        if file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            # 添加时间戳避免文件名冲突
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            name, ext = os.path.splitext(filename)
            filename = f"{name}_{timestamp}{ext}"
            
            filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], filename)
            file.save(filepath)
            
            # 验证文件内容
            df = load_data(filename)
            
            return jsonify({
                'message': '文件上传成功',
                'filename': filename,
                'data_info': {
                    'rows': len(df),
                    'columns': len(df.columns),
                    'column_names': df.columns.tolist(),
                    'dtypes': df.dtypes.astype(str).to_dict()
                }
            })
        else:
            return jsonify({'error': '不支持的文件格式'}), 400
            
    except Exception as e:
        logger.error(f"文件上传失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/data/<filename>', methods=['GET'])
def get_data_info(filename):
    """获取数据信息"""
    try:
        df = load_data(filename)
        
        # 数据验证 - 暂时简化
        validation_result = {'status': 'ok', 'warnings': []}
        
        return jsonify({
            'filename': filename,
            'shape': df.shape,
            'columns': df.columns.tolist(),
            'dtypes': df.dtypes.astype(str).to_dict(),
            'missing_values': df.isnull().sum().to_dict(),
            'numeric_columns': df.select_dtypes(include=['number']).columns.tolist(),
            'categorical_columns': df.select_dtypes(include=['object']).columns.tolist(),
            'data_preview': df.head(10).to_dict('records'),
            'validation': validation_result
        })
        
    except Exception as e:
        logger.error(f"获取数据信息失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/analyze/<filename>', methods=['POST'])
def analyze_data(filename):
    """数据分析API"""
    try:
        df = load_data(filename)
        data = request.get_json() or {}
        
        # 数据处理 - 暂时简化
        processed_data = df.select_dtypes(include=['number'])
        
        # 基本统计分析
        results = {
            'basic_stats': processed_data.describe().to_dict(),
            'correlation': processed_data.corr().to_dict() if len(processed_data.columns) > 1 else {},
            'shape': df.shape,
            'columns': df.columns.tolist()
        }
        
        # 如果指定了特定分析类型
        analysis_type = data.get('analysis_type')
        if analysis_type == 'hypothesis_test':
            test_config = data.get('test_config', {})
            results['hypothesis_test'] = stats_analyzer.hypothesis_testing(
                processed_data, **test_config
            )
        
        return jsonify(results)
        
    except Exception as e:
        logger.error(f"数据分析失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/cluster/<filename>', methods=['POST'])
def cluster_analysis(filename):
    """聚类分析API - 支持多种聚类算法"""
    try:
        # 使用已经导入的模块
        config = Config()
        cluster_analyzer = ClusterAnalyzer(config)
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        # 获取算法类型和参数
        algorithm = data.get('algorithm', 'kmeans')
        n_clusters = data.get('n_clusters', 3)
        features = data.get('features')
        
        # 验证聚类数量
        if n_clusters < 2:
            return jsonify({'error': '聚类数量必须大于等于2'}), 400
        if n_clusters > 20:
            return jsonify({'error': '聚类数量不能超过20'}), 400
        
        # 执行聚类分析
        try:
            logger.info(f"执行{algorithm}聚类: n_clusters={n_clusters}, features={features}")
            result = cluster_analyzer.perform_clustering(
                df, features=features, algorithm=algorithm, 
                n_clusters=n_clusters
            )
        except Exception as clustering_error:
            logger.error(f"{algorithm}聚类执行失败: {clustering_error}")
            return jsonify({'error': f'聚类分析失败: {str(clustering_error)}'}), 400
        
        if result.get('error'):
            return jsonify(result), 400
        
        return jsonify(result)
        
    except Exception as e:
        logger.error(f"聚类分析失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/visualize/<filename>', methods=['POST'])
def generate_visualization(filename):
    """生成可视化图表API"""
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        from visualization.charts import ChartGenerator
        from core.config import Config
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        chart_type = data.get('chart_type', 'correlation_heatmap')
        x_column = data.get('x_column')
        y_column = data.get('y_column')
        color_column = data.get('color_column')
        
        # 创建图表生成器
        config = Config()
        chart_generator = ChartGenerator(config)
        
        # 添加通用调试信息
        logger.info(f"[DEBUG] 图表请求: chart_type={chart_type}, x_column={x_column}, y_column={y_column}, color_column={color_column}")
        
        # 根据图表类型生成相应的图表
        if chart_type == 'correlation_heatmap' or chart_type == 'heatmap':
            result = chart_generator.create_correlation_heatmap(df)

        elif chart_type == 'bar':
            # 创建分类特征条形图
            if x_column:
                result = chart_generator.create_categorical_plots(df, features=[x_column])
            else:
                result = chart_generator.create_categorical_plots(df)

        elif chart_type == 'histogram':
            # 添加调试信息
            logger.info(f"[DEBUG] 直方图请求参数: chart_type={chart_type}, x_column={x_column}, y_column={y_column}, color_column={color_column}")
            if x_column:
                # 如果指定了X轴列，只为该列生成直方图
                logger.info(f"[DEBUG] 为指定列生成直方图: {x_column}")
                result = chart_generator.create_distribution_plots(df, features=[x_column])
            else:
                # 如果没有指定列，生成所有数值列的直方图
                logger.info(f"[DEBUG] 生成所有数值列的直方图")
                result = chart_generator.create_distribution_plots(df)
        
        elif chart_type == 'scatter':
            # 创建散点图
            logger.info(f"[DEBUG] 散点图请求参数: chart_type={chart_type}, x_column={x_column}, y_column={y_column}, color_column={color_column}")
            if not x_column or not y_column:
                result = {'error': '散点图需要指定X轴和Y轴字段'}
            else:
                result = chart_generator.create_scatter_plot(
                    df, 
                    x_feature=x_column, 
                    y_feature=y_column, 
                    color_feature=color_column if color_column else None
                )
        
        elif chart_type == 'pie':
            # 创建饼图
            logger.info(f"[DEBUG] 饼图请求参数: chart_type={chart_type}, x_column={x_column}")
            if not x_column:
                result = {'error': '饼图需要指定分类字段'}
            else:
                result = chart_generator.create_pie_chart(df, feature=x_column)
        
        else:
            # 未知图表类型，返回错误而不是默认仪表板
            logger.warning(f"[DEBUG] 未知图表类型: {chart_type}")
            result = {'error': f'不支持的图表类型: {chart_type}'}
        
        if result.get('error'):
            return jsonify(result), 400
        
        return jsonify(result)
        
    except Exception as e:
        logger.error(f"可视化生成失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/ai/analyze/<filename>', methods=['POST'])
def ai_analysis(filename):
    """AI智能分析API"""
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        try:
            from ai.nlp_analyzer import NLPAnalyzer
            from ai.report_generator import IntelligentReportGenerator
        except ImportError:
            # AI模块可能不存在，返回错误
            return jsonify({'error': 'AI分析模块未安装或配置'}), 500
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        analysis_type = data.get('analysis_type', 'nlp')
        text_column = data.get('text_column')
        language = data.get('language', 'chinese')
        
        # 获取文本列
        text_columns = df.select_dtypes(include=['object']).columns.tolist()
        if not text_columns:
            return jsonify({'error': '没有找到文本列用于AI分析'}), 400
        
        # 如果没有指定文本列，使用第一个文本列
        if not text_column or text_column not in text_columns:
            text_column = text_columns[0]
        
        # 获取文本数据
        text_data = df[text_column].dropna().astype(str).tolist()
        if not text_data:
            return jsonify({'error': f'列 {text_column} 中没有有效的文本数据'}), 400
        
        # 创建NLP分析器
        nlp_analyzer = NLPAnalyzer(language=language)
        
        results = {
            'analysis_type': analysis_type,
            'text_column': text_column,
            'language': language,
            'total_texts': len(text_data)
        }
        
        if analysis_type == 'sentiment' or analysis_type == 'nlp':
            # 情感分析
            sentiment_result = nlp_analyzer.sentiment_analysis(text_data)
            results['sentiment_analysis'] = sentiment_result
        
        if analysis_type == 'keywords' or analysis_type == 'nlp':
            # 关键词提取
            keywords_result = nlp_analyzer.extract_keywords(text_data)
            results['keywords'] = keywords_result
        
        if analysis_type == 'topics' or analysis_type == 'nlp':
            # 主题建模
            topics_result = nlp_analyzer.topic_modeling(text_data)
            results['topic_modeling'] = topics_result
        
        return jsonify(results)
        
    except Exception as e:
        logger.error(f"AI分析失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/anomaly/<filename>', methods=['POST'])
def anomaly_detection(filename):
    """异常检测API"""
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        try:
            from analysis.advanced import AnomalyDetector
        except ImportError:
            return jsonify({'error': '高级分析模块未安装或配置'}), 500
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        method = data.get('method', 'isolation_forest')
        features = data.get('features')
        
        # 选择数值特征
        if features is None:
            numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
            if not numeric_cols:
                return jsonify({'error': '没有找到数值列用于异常检测'}), 400
            features = numeric_cols
        
        # 创建异常检测器
        anomaly_detector = AnomalyDetector()
        
        # 执行异常检测
        if method == 'isolation_forest':
            contamination = data.get('contamination', 0.1)
            result = anomaly_detector.isolation_forest(df[features], contamination=contamination)
        elif method == 'one_class_svm':
            nu = data.get('nu', 0.1)
            kernel = data.get('kernel', 'rbf')
            result = anomaly_detector.one_class_svm(df[features], nu=nu, kernel=kernel)
        elif method == 'local_outlier_factor':
            n_neighbors = data.get('n_neighbors', 20)
            result = anomaly_detector.local_outlier_factor(df[features], n_neighbors=n_neighbors)
        else:
            return jsonify({'error': f'不支持的异常检测方法: {method}'}), 400
        
        if result.get('error'):
            return jsonify(result), 400
        
        return jsonify(result)
        
    except Exception as e:
        logger.error(f"异常检测失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/dimensionality/<filename>', methods=['POST'])
def dimensionality_reduction(filename):
    """降维分析API"""
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        try:
            from analysis.advanced import DimensionalityReducer
        except ImportError:
            return jsonify({'error': '高级分析模块未安装或配置'}), 500
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        method = data.get('method', 'pca')
        features = data.get('features')
        n_components = data.get('n_components', 2)
        
        # 选择数值特征
        if features is None:
            numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
            if not numeric_cols:
                return jsonify({'error': '没有找到数值列用于降维分析'}), 400
            features = numeric_cols
        
        # 创建降维分析器
        reducer = DimensionalityReducer()
        
        # 执行降维分析
        if method == 'pca':
            result = reducer.pca_analysis(df[features], n_components=n_components)
        elif method == 'tsne':
            perplexity = data.get('perplexity', 30)
            result = reducer.tsne_analysis(df[features], n_components=n_components, perplexity=perplexity)
        elif method == 'umap':
            n_neighbors = data.get('n_neighbors', 15)
            min_dist = data.get('min_dist', 0.1)
            result = reducer.umap_analysis(df[features], n_components=n_components, n_neighbors=n_neighbors, min_dist=min_dist)
        else:
            return jsonify({'error': f'不支持的降维方法: {method}'}), 400
        
        if result.get('error'):
            return jsonify(result), 400
        
        return jsonify(result)
        
    except Exception as e:
        logger.error(f"降维分析失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/advanced/<filename>', methods=['POST'])
def advanced_analysis(filename):
    """高级分析API - 统一入口"""
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        analysis_type = data.get('analysis_type')
        if not analysis_type:
            return jsonify({'error': '请指定分析类型'}), 400
        
        # 根据分析类型调用相应的分析器
        if analysis_type in ['pca', 'tsne', 'umap', 'ica']:
            return _dimensionality_analysis(df, data)
        elif analysis_type in ['isolation_forest', 'lof', 'one_class_svm', 'elliptic_envelope', 'statistical']:
            return _anomaly_analysis(df, data)
        elif analysis_type in ['trend', 'seasonality', 'forecast', 'changepoint']:
            return _timeseries_analysis(df, data)
        elif analysis_type in ['association_rules', 'regression']:
            return _other_analysis(df, data)
        else:
            return jsonify({'error': f'不支持的分析类型: {analysis_type}'}), 400
            
    except Exception as e:
        logger.error(f"高级分析失败: {e}")
        return jsonify({'error': str(e)}), 500

def _dimensionality_analysis(df, data):
    """降维分析"""
    try:
        from analysis.advanced import DimensionalityAnalyzer
    except ImportError:
        return jsonify({'error': '高级分析模块未安装或配置'}), 500
    
    analyzer = DimensionalityAnalyzer()
    analysis_type = data.get('analysis_type')
    
    if analysis_type == 'pca':
        n_components = data.get('n_components')
        variance_threshold = data.get('variance_threshold', 0.95)
        result = analyzer.pca_analysis(df, n_components=n_components, variance_threshold=variance_threshold)
    elif analysis_type == 'tsne':
        n_components = data.get('n_components', 2)
        perplexity = data.get('perplexity', 30)
        result = analyzer.tsne_analysis(df, n_components=n_components, perplexity=perplexity)
    elif analysis_type == 'umap':
        n_components = data.get('n_components', 2)
        n_neighbors = data.get('n_neighbors', 15)
        min_dist = data.get('min_dist', 0.1)
        result = analyzer.umap_analysis(df, n_components=n_components, n_neighbors=n_neighbors, min_dist=min_dist)
    elif analysis_type == 'ica':
        n_components = data.get('n_components')
        result = analyzer.ica_analysis(df, n_components=n_components)
    else:
        return jsonify({'error': f'不支持的降维方法: {analysis_type}'}), 400
    
    return jsonify(result)

def _anomaly_analysis(df, data):
    """异常检测分析"""
    try:
        from analysis.advanced import AnomalyDetector
    except ImportError:
        return jsonify({'error': '高级分析模块未安装或配置'}), 500
    
    detector = AnomalyDetector()
    analysis_type = data.get('analysis_type')
    
    if analysis_type == 'isolation_forest':
        contamination = data.get('contamination', 0.1)
        result = detector.isolation_forest(df, contamination=contamination)
    elif analysis_type == 'lof':
        n_neighbors = data.get('n_neighbors', 20)
        contamination = data.get('contamination', 0.1)
        result = detector.local_outlier_factor(df, n_neighbors=n_neighbors, contamination=contamination)
    elif analysis_type == 'one_class_svm':
        nu = data.get('nu', 0.1)
        kernel = data.get('kernel', 'rbf')
        result = detector.one_class_svm(df, nu=nu, kernel=kernel)
    elif analysis_type == 'elliptic_envelope':
        contamination = data.get('contamination', 0.1)
        result = detector.elliptic_envelope(df, contamination=contamination)
    elif analysis_type == 'statistical':
        method = data.get('method', 'iqr')
        threshold = data.get('threshold', 1.5)
        result = detector.statistical_outliers(df, method=method, threshold=threshold)
    else:
        return jsonify({'error': f'不支持的异常检测方法: {analysis_type}'}), 400
    
    return jsonify(result)

def _timeseries_analysis(df, data):
    """时间序列分析"""
    try:
        from analysis.advanced import TimeSeriesAnalyzer
    except ImportError:
        return jsonify({'error': '高级分析模块未安装或配置'}), 500
    
    analyzer = TimeSeriesAnalyzer()
    analysis_type = data.get('analysis_type')
    value_column = data.get('value_column')
    time_column = data.get('time_column')
    
    # 选择数值列作为值列
    if value_column is None:
        numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
        if not numeric_cols:
            return jsonify({'error': '没有找到数值列用于时间序列分析'}), 400
        value_column = numeric_cols[0]
    
    # 获取时间序列数据
    value_data = df[value_column].dropna()
    time_data = None
    if time_column and time_column in df.columns:
        time_data = df[time_column].dropna()
    
    if len(value_data) < 3:
        return jsonify({'error': '数据点太少，无法进行时间序列分析'}), 400
    
    if analysis_type == 'trend':
        result = analyzer.trend_analysis(value_data, time_data)
    elif analysis_type == 'seasonality':
        period = data.get('period')
        result = analyzer.seasonality_detection(value_data, period=period)
    elif analysis_type == 'forecast':
        forecast_periods = data.get('forecast_periods', 5)
        method = data.get('method', 'linear')
        result = analyzer.forecasting(value_data, forecast_periods=forecast_periods, method=method)
    elif analysis_type == 'changepoint':
        min_size = data.get('min_size', 5)
        result = analyzer.change_point_detection(value_data, min_size=min_size)
    else:
        return jsonify({'error': f'不支持的时间序列分析类型: {analysis_type}'}), 400
    
    return jsonify(result)

def _other_analysis(df, data):
    """其他分析类型"""
    analysis_type = data.get('analysis_type')
    
    if analysis_type == 'association_rules':
        # 关联规则分析
        try:
            from analysis.advanced import AssociationRuleAnalyzer
        except ImportError:
            return jsonify({'error': '高级分析模块未安装或配置'}), 500
        
        analyzer = AssociationRuleAnalyzer()
        min_support = data.get('min_support', 0.1)
        min_confidence = data.get('min_confidence', 0.5)
        min_lift = data.get('min_lift', 1.0)
        
        result = analyzer.apriori_analysis(
            df, 
            min_support=min_support,
            min_confidence=min_confidence,
            min_lift=min_lift
        )
        
        if 'error' in result:
            return jsonify({'error': result['error']}), 400
        
        return jsonify({
            'success': True,
            'analysis_type': 'association_rules',
            'result': result
        })
    
    elif analysis_type == 'regression':
        # 回归分析
        try:
            from analysis.advanced import RegressionAnalyzer
        except ImportError:
            return jsonify({'error': '高级分析模块未安装或配置'}), 500
        
        analyzer = RegressionAnalyzer()
        target_column = data.get('target_column')
        
        if not target_column:
            return jsonify({'error': '请指定目标变量'}), 400
        
        regression_type = data.get('regression_type', 'linear')
        
        if regression_type == 'linear':
            feature_columns = data.get('feature_columns')
            result = analyzer.linear_regression(
                df,
                target_column=target_column,
                feature_columns=feature_columns
            )
        elif regression_type == 'polynomial':
            feature_column = data.get('feature_column')
            degree = data.get('degree', 2)
            
            if not feature_column:
                return jsonify({'error': '请指定特征变量'}), 400
            
            result = analyzer.polynomial_regression(
                df,
                target_column=target_column,
                feature_column=feature_column,
                degree=degree
            )
        else:
            return jsonify({'error': f'不支持的回归类型: {regression_type}'}), 400
        
        if 'error' in result:
            return jsonify({'error': result['error']}), 400
        
        return jsonify({
            'success': True,
            'analysis_type': 'regression',
            'result': result
        })
    
    else:
        return jsonify({'error': f'不支持的分析类型: {analysis_type}'}), 400

@api_bp.route('/timeseries/<filename>', methods=['POST'])
def time_series_analysis(filename):
    """时间序列分析API - 兼容性保留"""
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        try:
            from analysis.advanced import TimeSeriesAnalyzer
        except ImportError:
            return jsonify({'error': '高级分析模块未安装或配置'}), 500
        
        df = load_data(filename)
        data = request.get_json() or {}
        
        analysis_type = data.get('analysis_type', 'trend')
        value_column = data.get('value_column')
        time_column = data.get('time_column')
        
        # 选择数值列作为值列
        if value_column is None:
            numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
            if not numeric_cols:
                return jsonify({'error': '没有找到数值列用于时间序列分析'}), 400
            value_column = numeric_cols[0]
        
        # 获取时间序列数据
        value_data = df[value_column].dropna()
        time_data = None
        if time_column and time_column in df.columns:
            time_data = df[time_column].dropna()
        
        if len(value_data) < 3:
            return jsonify({'error': '数据点太少，无法进行时间序列分析'}), 400
        
        # 创建时间序列分析器
        ts_analyzer = TimeSeriesAnalyzer()
        
        results = {
            'analysis_type': analysis_type,
            'value_column': value_column,
            'time_column': time_column,
            'data_points': len(value_data)
        }
        
        if analysis_type == 'trend':
            # 趋势分析
            trend_result = ts_analyzer.trend_analysis(value_data, time_data)
            results['trend_analysis'] = trend_result
        elif analysis_type == 'seasonality':
            # 季节性分析
            period = data.get('period')
            seasonality_result = ts_analyzer.seasonality_detection(value_data, period=period)
            results['seasonality_analysis'] = seasonality_result
        elif analysis_type == 'forecast':
            # 预测分析
            forecast_periods = data.get('forecast_periods', 5)
            method = data.get('method', 'linear')
            forecast_result = ts_analyzer.forecasting(value_data, forecast_periods=forecast_periods, method=method)
            results['forecast'] = forecast_result
        elif analysis_type == 'changepoint':
            # 变点检测
            min_size = data.get('min_size', 5)
            changepoint_result = ts_analyzer.change_point_detection(value_data, min_size=min_size)
            results['changepoint_detection'] = changepoint_result
        else:
            return jsonify({'error': f'不支持的时间序列分析类型: {analysis_type}'}), 400
        
        if results.get('error'):
            return jsonify(results), 400
        
        return jsonify(results)
        
    except Exception as e:
        logger.error(f"时间序列分析失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/batch_analysis/<filename>', methods=['POST'])
def batch_analysis(filename):
    """批量分析API"""
    try:
        df = load_data(filename)
        data = request.get_json() or {}
        
        analysis_types = data.get('analysis_types', ['basic', 'correlation'])
        
        results = {
            'analysis_types': analysis_types,
            'data_shape': df.shape,
            'columns': df.columns.tolist(),
            'analyses': {}
        }
        
        # 基础统计分析
        if 'basic' in analysis_types or 'statistics' in analysis_types:
            try:
                numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
                if numeric_cols:
                    basic_stats = df[numeric_cols].describe().to_dict()
                    results['analyses']['basic_statistics'] = basic_stats
                else:
                    results['analyses']['basic_statistics'] = {'error': '没有数值列用于统计分析'}
            except Exception as e:
                results['analyses']['basic_statistics'] = {'error': str(e)}
        
        # 相关性分析
        if 'correlation' in analysis_types:
            try:
                numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
                if len(numeric_cols) > 1:
                    correlation = df[numeric_cols].corr().to_dict()
                    results['analyses']['correlation'] = correlation
                else:
                    results['analyses']['correlation'] = {'error': '需要至少两个数值列进行相关性分析'}
            except Exception as e:
                results['analyses']['correlation'] = {'error': str(e)}
        
        # 聚类分析
        if 'clustering' in analysis_types:
            try:
                import sys
                import os
                sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
                from analysis.clustering import ClusterAnalyzer
                from core.config import Config
                config = Config()
                cluster_analyzer = ClusterAnalyzer(config)
                clustering = cluster_analyzer.perform_clustering(df)
                results['analyses']['clustering'] = clustering
            except Exception as e:
                results['analyses']['clustering'] = {'error': str(e)}
        
        # 异常检测
        if 'anomaly' in analysis_types:
            try:
                import sys
                import os
                sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
                try:
                    from analysis.advanced import AnomalyDetector
                except ImportError:
                    results['analyses']['anomaly_detection'] = {'error': '高级分析模块未安装或配置'}
                else:
                    anomaly_detector = AnomalyDetector()
                    numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
                    if numeric_cols:
                        anomaly = anomaly_detector.isolation_forest(df[numeric_cols])
                        results['analyses']['anomaly_detection'] = anomaly
                    else:
                        results['analyses']['anomaly_detection'] = {'error': '没有数值列用于异常检测'}
            except Exception as e:
                results['analyses']['anomaly_detection'] = {'error': str(e)}
        
        # 降维分析
        if 'dimensionality' in analysis_types:
            try:
                import sys
                import os
                sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
                try:
                    from analysis.advanced import DimensionalityAnalyzer
                except ImportError:
                    results['analyses']['dimensionality_reduction'] = {'error': '高级分析模块未安装或配置'}
                else:
                    reducer = DimensionalityAnalyzer()
                    numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
                    if numeric_cols:
                        pca_result = reducer.pca_analysis(df[numeric_cols])
                        results['analyses']['dimensionality_reduction'] = pca_result
                    else:
                        results['analyses']['dimensionality_reduction'] = {'error': '没有数值列用于降维分析'}
            except Exception as e:
                results['analyses']['dimensionality_reduction'] = {'error': str(e)}
        
        return jsonify({
            'batch_analysis_results': results,
            'completed_tasks': analysis_types,
            'timestamp': datetime.now().isoformat()
        })
        
    except Exception as e:
        logger.error(f"批量分析失败: {e}")
        return jsonify({'error': str(e)}), 500

@api_bp.route('/report/<filename>', methods=['POST'])
def generate_report(filename):
    """生成智能分析报告API"""
    try:
        data = request.get_json() or {}
        title = data.get('title', '问卷调查分析报告')
        author = data.get('author', 'SurveyAnalyzer')
        report_type = data.get('type', 'comprehensive')
        include_charts = data.get('include_charts', True)
        include_raw_data = data.get('include_raw_data', True)
        use_ai_insights = data.get('use_ai_insights', True)
        
        # 如果没有filename，返回默认模板
        if not filename:
            return jsonify({
                'title': title,
                'author': author,
                'sections': [
                    {'title': '数据概览', 'type': 'analysis', 'content': '请先上传数据文件以生成具体的分析报告'},
                    {'title': '分析说明', 'type': 'text', 'content': '上传数据后，系统将自动生成详细的分析报告'}
                ]
            })
        
        # 加载数据
        df = load_data(filename)
        
        # 执行各种分析
        analysis_results = {}
        
        # 统计分析
        try:
            from analysis.statistics import StatisticalAnalyzer
            from core.config import Config
            config = Config()
            stats_analyzer = StatisticalAnalyzer(config)
            analysis_results['statistical_analysis'] = stats_analyzer.analyze(df)
        except Exception as e:
            logger.error(f"统计分析失败: {e}")
        
        # 聚类分析
        try:
            from analysis.clustering import ClusterAnalyzer
            from core.config import Config
            config = Config()
            cluster_analyzer = ClusterAnalyzer(config)
            analysis_results['cluster_analysis'] = cluster_analyzer.analyze(df)
        except Exception as e:
            logger.error(f"聚类分析失败: {e}")
        
        # 文本分析（如果有文本列）- 禁用情感分析
        text_columns = df.select_dtypes(include=['object']).columns.tolist()
        if text_columns:
            try:
                from ai.nlp_analyzer import NLPAnalyzer
                text_analyzer = NLPAnalyzer()
                # 禁用情感分析，只进行关键词提取和主题建模
                analysis_results['text_analysis'] = text_analyzer.analyze(df, text_columns, enable_sentiment=False)
            except Exception as e:
                logger.error(f"文本分析失败: {e}")
        
        # 生成智能报告
        try:
            if REPORT_GENERATOR_AVAILABLE:
                report_generator = IntelligentReportGenerator()
                # 使用AI增强的综合报告生成
                if use_ai_insights:
                    report_data = report_generator.generate_comprehensive_report(df, analysis_results)
                else:
                    report_data = report_generator.generate_report(df, title=title)
            else:
                report_generator = ReportGenerator()
                report_data = report_generator.generate_report(df, title=title)
            
            # 提取洞察和建议
            insights = report_data.get('insights', [])
            recommendations = report_data.get('recommendations', [])
            
            # 添加详细的统计分析
            try:
                from analysis.statistics import StatisticalAnalyzer
                stat_analyzer = StatisticalAnalyzer()
                
                # 描述性统计分析
                descriptive_stats = stat_analyzer.descriptive_statistics(df)
                
                # 相关性分析（仅对数值型变量）
                numeric_cols = df.select_dtypes(include=[np.number]).columns
                correlation_analysis = {}
                if len(numeric_cols) > 1:
                    correlation_analysis = stat_analyzer.correlation_analysis(df)
                
                # 将统计分析结果添加到报告数据中
                if descriptive_stats.get('success'):
                    report_data['descriptive_statistics'] = descriptive_stats
                if correlation_analysis.get('success'):
                    report_data['correlation_analysis'] = correlation_analysis
                    
            except Exception as e:
                logger.warning(f"统计分析模块加载失败: {e}")
            
            # 根据报告类型调整内容
            if report_type == 'executive':
                # 执行摘要 - 只包含关键信息
                report_content = {
                    'summary': report_data.get('summary', {}),
                    'data_quality': report_data.get('data_quality', {}),
                    'key_insights': insights[:3],  # 只显示前3个洞察
                    'recommendations': recommendations[:3]  # 只显示前3个建议
                }
            elif report_type == 'comprehensive':
                # 综合报告 - 包含所有信息
                report_content = report_data
            else:
                # 自动摘要 - 平衡的信息量
                report_content = {
                    'summary': report_data.get('summary', {}),
                    'data_quality': report_data.get('data_quality', {}),
                    'numeric_analysis': report_data.get('numeric_analysis', {}),
                    'categorical_analysis': report_data.get('categorical_analysis', {}),
                    'descriptive_statistics': report_data.get('descriptive_statistics', {}),
                    'correlation_analysis': report_data.get('correlation_analysis', {}),
                    'insights': insights,
                    'recommendations': recommendations
                }
            
            # 格式化洞察数据
            formatted_insights = []
            for insight in insights:
                if isinstance(insight, str):
                    # 字符串格式的洞察
                    formatted_insights.append({
                        'title': '数据洞察',
                        'description': insight,
                        'importance': 'medium',
                        'category': 'general',
                        'type': 'general'
                    })
                elif isinstance(insight, dict):
                    # 字典格式的洞察
                    formatted_insights.append({
                        'title': insight.get('title', '数据洞察'),
                        'description': insight.get('description', ''),
                        'importance': insight.get('importance', 'medium'),
                        'category': insight.get('category', 'general'),
                        'type': insight.get('type', 'general')
                    })
                else:
                    # 其他格式转为字符串
                    formatted_insights.append({
                        'title': '数据洞察',
                        'description': str(insight),
                        'importance': 'medium',
                        'category': 'general',
                        'type': 'general'
                    })
            
            return jsonify({
                'success': True,
                'title': title,
                'author': author,
                'report': {
                    'type': report_type,
                    'content': report_content,
                    'insights': formatted_insights,
                    'generated_at': datetime.now().isoformat(),
                    'data_summary': {
                        'rows': len(df),
                        'columns': len(df.columns),
                        'filename': filename
                    },
                    'analysis_modules': list(analysis_results.keys()),
                    'ai_enhanced': use_ai_insights
                }
            })
            
        except ImportError:
            # 如果AI模块不可用，使用基础报告生成，但仍然包含统计分析
            logger.warning("AI模块不可用，使用基础报告生成模式")
            
            # 生成基础报告数据
            try:
                report_generator = ReportGenerator()
                report_data = report_generator.generate_report(df, title=title)
            except Exception as e:
                logger.error(f"基础报告生成失败: {e}")
                report_data = {
                    'summary': {
                        'total_rows': len(df),
                        'total_columns': len(df.columns),
                        'numeric_columns': len(df.select_dtypes(include=[np.number]).columns),
                        'categorical_columns': len(df.select_dtypes(include=['object', 'category']).columns)
                    },
                    'data_quality': {
                        'missing_values': int(df.isnull().sum().sum()),
                        'missing_percentage': round(float(df.isnull().sum().sum() / (df.shape[0] * df.shape[1]) * 100), 2),
                        'duplicate_rows': int(df.duplicated().sum())
                    }
                }
            
            # 添加详细的统计分析（即使AI模块失败也要执行）
            try:
                from analysis.statistics import StatisticalAnalyzer
                stat_analyzer = StatisticalAnalyzer()
                
                # 描述性统计分析
                descriptive_stats = stat_analyzer.descriptive_statistics(df)
                
                # 相关性分析（仅对数值型变量）
                numeric_cols = df.select_dtypes(include=[np.number]).columns
                correlation_analysis = {}
                if len(numeric_cols) > 1:
                    correlation_analysis = stat_analyzer.correlation_analysis(df)
                
                # 将统计分析结果添加到报告数据中
                if descriptive_stats.get('success'):
                    report_data['descriptive_statistics'] = descriptive_stats
                if correlation_analysis.get('success'):
                    report_data['correlation_analysis'] = correlation_analysis
                    
            except Exception as e:
                logger.warning(f"统计分析模块加载失败: {e}")
            
            # 根据报告类型调整内容
            if report_type == 'executive':
                # 执行摘要 - 只包含关键信息
                report_content = {
                    'summary': report_data.get('summary', {}),
                    'data_quality': report_data.get('data_quality', {})
                }
            elif report_type == 'comprehensive':
                # 综合报告 - 包含所有信息
                report_content = report_data
            else:
                # 自动摘要 - 平衡的信息量
                report_content = {
                    'summary': report_data.get('summary', {}),
                    'data_quality': report_data.get('data_quality', {}),
                    'numeric_analysis': report_data.get('numeric_analysis', {}),
                    'categorical_analysis': report_data.get('categorical_analysis', {}),
                    'descriptive_statistics': report_data.get('descriptive_statistics', {}),
                    'correlation_analysis': report_data.get('correlation_analysis', {})
                }
            
            return jsonify({
                'success': True,
                'title': title,
                'author': author,
                'report': {
                    'type': report_type,
                    'content': report_content,
                    'insights': [],
                    'generated_at': datetime.now().isoformat(),
                    'data_summary': {
                        'rows': len(df),
                        'columns': len(df.columns),
                        'filename': filename
                    },
                    'analysis_modules': ['basic_statistics'],
                    'ai_enhanced': False
                }
            })
        
    except Exception as e:
        logger.error(f"报告生成失败: {e}")
        return jsonify({'error': f'报告生成失败: {str(e)}'}), 500

@api_bp.route('/ai_chat', methods=['POST'])
def ai_chat():
    """AI聊天助手API"""
    try:
        data = request.get_json() or {}
        message = data.get('message', '').strip()
        filename = data.get('filename')
        context = data.get('context', {})
        
        if not message:
            return jsonify({
                'success': False,
                'response': '请输入您的问题。'
            })
        
        # 尝试导入聊天接口和分析器
        try:
            import sys
            import os
            # 添加项目根目录到Python路径
            project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
            sys.path.insert(0, project_root)
            sys.path.insert(0, os.path.join(project_root, 'src'))
            from ai.chat_interface import ChatInterface
            from analysis.statistics import StatisticalAnalyzer
            from core.config import Config
        except ImportError as e:
            logger.error(f"模块导入失败: {e}")
            return jsonify({
                'success': False,
                'response': '抱歉，AI聊天功能暂时不可用。请稍后再试。'
            })
        
        # 加载数据（如果提供了文件名）
        df = None
        analysis_results = {}
        if filename:
            try:
                df = load_data(filename)
                
                # 进行统计分析
                if df is not None and not df.empty:
                    try:
                        config = Config()
                        stats_analyzer = StatisticalAnalyzer(config)
                        stats_result = stats_analyzer.analyze(df)
                        
                        if stats_result.get('success'):
                            # 重新组织结构以符合ChatInterface的期望
                            correlation_data = stats_result.get('correlation_analysis', {})
                            if correlation_data.get('success', False):
                                analysis_results['statistical_analysis'] = {
                                    'correlation_matrix': correlation_data.get('correlation_matrix', {}),
                                    'variables': correlation_data.get('summary', {}).get('features_analyzed', []),
                                    'method': correlation_data.get('method', 'pearson'),
                                    'strong_correlations': correlation_data.get('strong_correlations', [])
                                }
                                logger.info("统计分析完成，相关性分析结果已传递给AI聊天接口")
                            else:
                                logger.warning(f"相关性分析失败: {correlation_data.get('error', '未知错误')}")
                        else:
                            logger.warning(f"统计分析失败: {stats_result.get('error', '未知错误')}")
                    except Exception as stats_error:
                        logger.warning(f"统计分析过程中出错: {stats_error}")
                        
            except Exception as e:
                logger.warning(f"无法加载数据文件 {filename}: {e}")
        
        # 创建聊天接口，传入数据和分析结果
        chat_interface = ChatInterface(data=df, analysis_results=analysis_results)
        
        # 处理查询
        result = chat_interface.query_data(message)
        
        if result.get('success', True):
            return jsonify({
                'success': True,
                'response': result.get('response', '我已收到您的消息，正在处理中...'),
                'query_type': result.get('query_type', 'general')
            })
        else:
            return jsonify({
                'success': False,
                'response': result.get('response', '抱歉，我无法理解您的问题。请尝试重新表述。')
            })
        
    except Exception as e:
        logger.error(f"AI聊天失败: {e}")
        return jsonify({
            'success': False,
            'response': '抱歉，发生了系统错误。请稍后再试。'
        })

@api_bp.route('/export/<filename>', methods=['POST'])
def export_results(filename):
    """导出分析结果API"""
    try:
        data = request.get_json() or {}
        
        # 检查是否是报告导出
        if 'title' in data and 'sections' in data:
            return export_report(data)
        
        # 原有的分析结果导出逻辑
        results = data.get('results', {})
        export_format = data.get('format', 'json')
        
        if export_format == 'json':
            # 导出为JSON
            export_filename = f"analysis_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
            export_path = os.path.join(current_app.config['UPLOAD_FOLDER'], export_filename)
            
            with open(export_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
            
            return jsonify({
                'message': '结果导出成功',
                'export_filename': export_filename,
                'export_path': export_path
            })
        
        return jsonify({'error': '不支持的导出格式'}), 400
        
    except Exception as e:
        logger.error(f"导出失败: {e}")
        return jsonify({'error': f'导出失败: {str(e)}'}), 500

def export_report(data):
    """导出报告"""
    from flask import make_response
    
    title = data.get('title', '分析报告')
    author = data.get('author', 'SurveyAnalyzer')
    sections = data.get('sections', [])
    export_format = data.get('format', 'html')
    
    if export_format == 'html':
        # 生成HTML报告
        html_content = generate_html_report(title, author, sections)
        
        response = make_response(html_content)
        response.headers['Content-Type'] = 'text/html; charset=utf-8'
        response.headers['Content-Disposition'] = f'attachment; filename="{title}.html"'
        return response
        
    elif export_format == 'json':
        # 导出为JSON
        report_data = {
            'title': title,
            'author': author,
            'sections': sections,
            'exported_at': datetime.now().isoformat()
        }
        
        response = make_response(json.dumps(report_data, ensure_ascii=False, indent=2))
        response.headers['Content-Type'] = 'application/json; charset=utf-8'
        response.headers['Content-Disposition'] = f'attachment; filename="{title}.json"'
        return response
        
    else:
        # 其他格式暂不支持，返回HTML
        html_content = generate_html_report(title, author, sections)
        
        response = make_response(html_content)
        response.headers['Content-Type'] = 'text/html; charset=utf-8'
        response.headers['Content-Disposition'] = f'attachment; filename="{title}.html"'
        return response

def generate_html_report(title, author, sections):
    """生成HTML报告"""
    html = f'''
<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{title}</title>
    <style>
        body {{
            font-family: 'Microsoft YaHei', Arial, sans-serif;
            line-height: 1.6;
            margin: 0;
            padding: 20px;
            background-color: #f5f5f5;
        }}
        .container {{
            max-width: 800px;
            margin: 0 auto;
            background: white;
            padding: 40px;
            border-radius: 8px;
            box-shadow: 0 2px 10px rgba(0,0,0,0.1);
        }}
        .report-header {{
            text-align: center;
            margin-bottom: 40px;
            border-bottom: 2px solid #007bff;
            padding-bottom: 20px;
        }}
        .report-title {{
            font-size: 28px;
            font-weight: bold;
            color: #333;
            margin-bottom: 10px;
        }}
        .report-meta {{
            color: #666;
            font-size: 14px;
        }}
        .section {{
            margin-bottom: 30px;
        }}
        .section-title {{
            font-size: 20px;
            font-weight: bold;
            color: #007bff;
            margin-bottom: 15px;
            border-left: 4px solid #007bff;
            padding-left: 15px;
        }}
        .section-content {{
            color: #555;
            line-height: 1.8;
        }}
        .alert {{
            padding: 15px;
            margin-bottom: 20px;
            border-radius: 4px;
        }}
        .alert-info {{
            background-color: #d1ecf1;
            border-color: #bee5eb;
            color: #0c5460;
        }}
        .alert-success {{
            background-color: #d4edda;
            border-color: #c3e6cb;
            color: #155724;
        }}
        .chart-placeholder {{
            text-align: center;
            padding: 40px;
            background-color: #f8f9fa;
            border: 2px dashed #dee2e6;
            border-radius: 4px;
            color: #6c757d;
        }}
        @media print {{
            body {{ background-color: white; }}
            .container {{ box-shadow: none; }}
        }}
    </style>
</head>
<body>
    <div class="container">
        <div class="report-header">
            <div class="report-title">{title}</div>
            <div class="report-meta">
                作者：{author} | 生成时间：{datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')}
            </div>
        </div>
'''
    
    for section in sections:
        section_title = section.get('title', '')
        section_type = section.get('type', 'text')
        section_content = section.get('content', '')
        
        html += f'        <div class="section">\n'
        html += f'            <div class="section-title">{section_title}</div>\n'
        html += f'            <div class="section-content">\n'
        
        if section_type == 'analysis':
            html += f'                <div class="alert alert-info">{section_content}</div>\n'
        elif section_type == 'summary':
            html += f'                <div class="alert alert-success"><strong>摘要：</strong>{section_content}</div>\n'
        elif section_type == 'chart':
            html += f'                <div class="chart-placeholder">📊 {section_content}<br><small>图表占位符</small></div>\n'
        elif section_type == 'table':
            html += f'                <div class="alert alert-info">📋 {section_content}</div>\n'
        else:
            html += f'                <p>{section_content}</p>\n'
        
        html += f'            </div>\n'
        html += f'        </div>\n'
    
    html += '''
    </div>
</body>
</html>
'''
    
    return html