"""高级分析模块

提供降维分析、异常检测、时间序列分析等高级分析功能。
"""

import logging
from typing import Dict, List, Any, Optional, Tuple
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import warnings
warnings.filterwarnings('ignore')

try:
    from sklearn.decomposition import PCA, FastICA
    from sklearn.manifold import TSNE
    from sklearn.ensemble import IsolationForest
    from sklearn.neighbors import LocalOutlierFactor
    from sklearn.svm import OneClassSVM
    from sklearn.preprocessing import StandardScaler
    from sklearn.covariance import EllipticEnvelope
    from scipy import stats
    from scipy.signal import find_peaks
    import umap
except ImportError as e:
    logging.warning(f"某些高级分析依赖包未安装: {e}")
    logging.warning("请运行: pip install scikit-learn scipy umap-learn")

class DimensionalityAnalyzer:
    """降维分析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.scaler = StandardScaler()
    
    def pca_analysis(self, data: pd.DataFrame, n_components: Optional[int] = None, 
                    variance_threshold: float = 0.95) -> Dict[str, Any]:
        """
        主成分分析
        
        Args:
            data: 输入数据
            n_components: 主成分数量
            variance_threshold: 方差解释阈值
            
        Returns:
            PCA分析结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # 确定主成分数量
            if n_components is None:
                # 先用所有成分计算，然后根据方差阈值确定
                pca_full = PCA()
                pca_full.fit(scaled_data)
                
                cumsum_variance = np.cumsum(pca_full.explained_variance_ratio_)
                n_components = np.argmax(cumsum_variance >= variance_threshold) + 1
                n_components = min(n_components, min(scaled_data.shape) - 1)
            
            # PCA分析
            pca = PCA(n_components=n_components)
            transformed_data = pca.fit_transform(scaled_data)
            
            # 计算贡献度
            feature_importance = np.abs(pca.components_).mean(axis=0)
            feature_ranking = pd.Series(feature_importance, index=numeric_data.columns).sort_values(ascending=False)
            
            # 计算特征值（eigenvalues）
            eigenvalues = pca.explained_variance_.tolist()
            
            # 生成可视化数据
            plots = self._generate_pca_plots(pca, transformed_data, numeric_data.columns.tolist())
            
            return {
                'n_components': n_components,
                'n_features': len(numeric_data.columns),
                'n_samples': len(numeric_data),
                'explained_variance_ratio': pca.explained_variance_ratio_.tolist(),
                'cumulative_variance_ratio': np.cumsum(pca.explained_variance_ratio_).tolist(),
                'total_variance_explained': np.sum(pca.explained_variance_ratio_),
                'eigenvalues': eigenvalues,
                'components': pca.components_.tolist(),
                'transformed_data': transformed_data.tolist(),
                'feature_importance': feature_ranking.to_dict(),
                'feature_names': numeric_data.columns.tolist(),
                'original_features': numeric_data.columns.tolist(),
                'singular_values': pca.singular_values_.tolist(),
                'plots': plots
            }
            
        except Exception as e:
            self.logger.error(f"PCA分析失败: {e}")
            return {'error': str(e)}
    
    def _generate_pca_plots(self, pca, transformed_data, feature_names):
        """生成PCA可视化数据"""
        plots = {}
        
        try:
            # 1. 主成分散点图（如果有至少2个主成分）
            if transformed_data.shape[1] >= 2:
                plots['pcaPlot'] = {
                    'data': [{
                        'x': transformed_data[:, 0].tolist(),
                        'y': transformed_data[:, 1].tolist(),
                        'mode': 'markers',
                        'type': 'scatter',
                        'name': '数据点',
                        'marker': {
                            'size': 8,
                            'color': 'rgba(31, 119, 180, 0.7)',
                            'line': {'width': 1, 'color': 'rgba(31, 119, 180, 1)'}
                        }
                    }],
                    'layout': {
                        'title': '主成分分析散点图',
                        'xaxis': {'title': f'第一主成分 (解释方差: {pca.explained_variance_ratio_[0]:.1%})'},
                        'yaxis': {'title': f'第二主成分 (解释方差: {pca.explained_variance_ratio_[1]:.1%})' if len(pca.explained_variance_ratio_) > 1 else '第二主成分'},
                        'hovermode': 'closest',
                        'showlegend': True
                    }
                }
            
            # 2. 解释方差比柱状图
            plots['variancePlot'] = {
                'data': [{
                    'x': [f'PC{i+1}' for i in range(len(pca.explained_variance_ratio_))],
                    'y': (pca.explained_variance_ratio_ * 100).tolist(),
                    'type': 'bar',
                    'name': '解释方差比',
                    'marker': {'color': 'rgba(55, 128, 191, 0.7)'}
                }],
                'layout': {
                    'title': '各主成分解释方差比',
                    'xaxis': {'title': '主成分'},
                    'yaxis': {'title': '解释方差比 (%)'},
                    'showlegend': False
                }
            }
            
            # 3. 累积解释方差图
            cumulative_variance = np.cumsum(pca.explained_variance_ratio_) * 100
            plots['cumulativePlot'] = {
                'data': [{
                    'x': [f'PC{i+1}' for i in range(len(cumulative_variance))],
                    'y': cumulative_variance.tolist(),
                    'type': 'scatter',
                    'mode': 'lines+markers',
                    'name': '累积解释方差',
                    'line': {'color': 'rgba(219, 64, 82, 1)', 'width': 3},
                    'marker': {'size': 8, 'color': 'rgba(219, 64, 82, 1)'}
                }],
                'layout': {
                    'title': '累积解释方差图',
                    'xaxis': {'title': '主成分'},
                    'yaxis': {'title': '累积解释方差比 (%)', 'range': [0, 100]},
                    'showlegend': False
                }
            }
            
        except Exception as e:
            self.logger.error(f"生成PCA可视化数据失败: {e}")
        
        return plots
    
    def tsne_analysis(self, data: pd.DataFrame, n_components: int = 2, 
                     perplexity: float = 30.0, random_state: int = 42) -> Dict[str, Any]:
        """
        t-SNE降维分析
        
        Args:
            data: 输入数据
            n_components: 降维后的维度
            perplexity: 困惑度参数
            random_state: 随机种子
            
        Returns:
            t-SNE分析结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            if len(numeric_data) < 4:
                return {'error': 't-SNE需要至少4个样本'}
            
            # 调整perplexity
            max_perplexity = (len(numeric_data) - 1) / 3
            perplexity = min(perplexity, max_perplexity)
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # t-SNE分析
            tsne = TSNE(n_components=n_components, perplexity=perplexity, 
                       random_state=random_state, n_iter=1000)
            transformed_data = tsne.fit_transform(scaled_data)
            
            return {
                'n_components': n_components,
                'perplexity': perplexity,
                'transformed_data': transformed_data.tolist(),
                'kl_divergence': tsne.kl_divergence_,
                'n_iter': tsne.n_iter_,
                'original_features': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"t-SNE分析失败: {e}")
            return {'error': str(e)}
    
    def umap_analysis(self, data: pd.DataFrame, n_components: int = 2, 
                     n_neighbors: int = 15, min_dist: float = 0.1) -> Dict[str, Any]:
        """
        UMAP降维分析
        
        Args:
            data: 输入数据
            n_components: 降维后的维度
            n_neighbors: 邻居数量
            min_dist: 最小距离
            
        Returns:
            UMAP分析结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 调整参数
            n_neighbors = min(n_neighbors, len(numeric_data) - 1)
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # UMAP分析
            reducer = umap.UMAP(n_components=n_components, n_neighbors=n_neighbors, 
                              min_dist=min_dist, random_state=42)
            transformed_data = reducer.fit_transform(scaled_data)
            
            return {
                'n_components': n_components,
                'n_neighbors': n_neighbors,
                'min_dist': min_dist,
                'transformed_data': transformed_data.tolist(),
                'original_features': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"UMAP分析失败: {e}")
            return {'error': str(e)}
    
    def ica_analysis(self, data: pd.DataFrame, n_components: Optional[int] = None) -> Dict[str, Any]:
        """
        独立成分分析
        
        Args:
            data: 输入数据
            n_components: 成分数量
            
        Returns:
            ICA分析结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 确定成分数量
            if n_components is None:
                n_components = min(numeric_data.shape[1], numeric_data.shape[0] - 1)
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # ICA分析
            ica = FastICA(n_components=n_components, random_state=42, max_iter=1000)
            transformed_data = ica.fit_transform(scaled_data)
            
            return {
                'n_components': n_components,
                'components': ica.components_.tolist(),
                'mixing_matrix': ica.mixing_.tolist(),
                'transformed_data': transformed_data.tolist(),
                'n_iter': ica.n_iter_,
                'original_features': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"ICA分析失败: {e}")
            return {'error': str(e)}

class AnomalyDetector:
    """异常检测器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.scaler = StandardScaler()
    
    def isolation_forest(self, data: pd.DataFrame, contamination: float = 0.1, 
                        random_state: int = 42) -> Dict[str, Any]:
        """
        孤立森林异常检测
        
        Args:
            data: 输入数据
            contamination: 异常值比例
            random_state: 随机种子
            
        Returns:
            异常检测结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # 孤立森林
            iso_forest = IsolationForest(contamination=contamination, random_state=random_state)
            outlier_labels = iso_forest.fit_predict(scaled_data)
            outlier_scores = iso_forest.decision_function(scaled_data)
            
            # 识别异常值
            outlier_indices = np.where(outlier_labels == -1)[0]
            normal_indices = np.where(outlier_labels == 1)[0]
            
            return {
                'method': 'isolation_forest',
                'contamination': contamination,
                'outlier_indices': outlier_indices.tolist(),
                'normal_indices': normal_indices.tolist(),
                'outlier_scores': outlier_scores.tolist(),
                'n_outliers': len(outlier_indices),
                'outlier_percentage': len(outlier_indices) / len(data) * 100,
                'features_used': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"孤立森林异常检测失败: {e}")
            return {'error': str(e)}
    
    def local_outlier_factor(self, data: pd.DataFrame, n_neighbors: int = 20, 
                           contamination: float = 0.1) -> Dict[str, Any]:
        """
        局部异常因子检测
        
        Args:
            data: 输入数据
            n_neighbors: 邻居数量
            contamination: 异常值比例
            
        Returns:
            异常检测结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 调整邻居数量
            n_neighbors = min(n_neighbors, len(numeric_data) - 1)
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # LOF检测
            lof = LocalOutlierFactor(n_neighbors=n_neighbors, contamination=contamination)
            outlier_labels = lof.fit_predict(scaled_data)
            outlier_scores = lof.negative_outlier_factor_
            
            # 识别异常值
            outlier_indices = np.where(outlier_labels == -1)[0]
            normal_indices = np.where(outlier_labels == 1)[0]
            
            return {
                'method': 'local_outlier_factor',
                'n_neighbors': n_neighbors,
                'contamination': contamination,
                'outlier_indices': outlier_indices.tolist(),
                'normal_indices': normal_indices.tolist(),
                'outlier_scores': outlier_scores.tolist(),
                'n_outliers': len(outlier_indices),
                'outlier_percentage': len(outlier_indices) / len(data) * 100,
                'features_used': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"LOF异常检测失败: {e}")
            return {'error': str(e)}
    
    def one_class_svm(self, data: pd.DataFrame, nu: float = 0.1, 
                     kernel: str = 'rbf') -> Dict[str, Any]:
        """
        单类支持向量机异常检测
        
        Args:
            data: 输入数据
            nu: 异常值比例上界
            kernel: 核函数类型
            
        Returns:
            异常检测结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # One-Class SVM
            svm = OneClassSVM(nu=nu, kernel=kernel)
            outlier_labels = svm.fit_predict(scaled_data)
            outlier_scores = svm.decision_function(scaled_data)
            
            # 识别异常值
            outlier_indices = np.where(outlier_labels == -1)[0]
            normal_indices = np.where(outlier_labels == 1)[0]
            
            return {
                'method': 'one_class_svm',
                'nu': nu,
                'kernel': kernel,
                'outlier_indices': outlier_indices.tolist(),
                'normal_indices': normal_indices.tolist(),
                'outlier_scores': outlier_scores.tolist(),
                'n_outliers': len(outlier_indices),
                'outlier_percentage': len(outlier_indices) / len(data) * 100,
                'features_used': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"One-Class SVM异常检测失败: {e}")
            return {'error': str(e)}
    
    def elliptic_envelope(self, data: pd.DataFrame, contamination: float = 0.1) -> Dict[str, Any]:
        """
        椭圆包络异常检测
        
        Args:
            data: 输入数据
            contamination: 异常值比例
            
        Returns:
            异常检测结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            # 标准化
            scaled_data = self.scaler.fit_transform(numeric_data)
            
            # 椭圆包络
            envelope = EllipticEnvelope(contamination=contamination, random_state=42)
            outlier_labels = envelope.fit_predict(scaled_data)
            outlier_scores = envelope.decision_function(scaled_data)
            
            # 识别异常值
            outlier_indices = np.where(outlier_labels == -1)[0]
            normal_indices = np.where(outlier_labels == 1)[0]
            
            return {
                'method': 'elliptic_envelope',
                'contamination': contamination,
                'outlier_indices': outlier_indices.tolist(),
                'normal_indices': normal_indices.tolist(),
                'outlier_scores': outlier_scores.tolist(),
                'n_outliers': len(outlier_indices),
                'outlier_percentage': len(outlier_indices) / len(data) * 100,
                'features_used': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"椭圆包络异常检测失败: {e}")
            return {'error': str(e)}
    
    def statistical_outliers(self, data: pd.DataFrame, method: str = 'iqr', 
                           threshold: float = 1.5) -> Dict[str, Any]:
        """
        统计方法异常检测
        
        Args:
            data: 输入数据
            method: 检测方法 ('iqr', 'zscore', 'modified_zscore')
            threshold: 阈值
            
        Returns:
            异常检测结果
        """
        try:
            # 数据预处理
            numeric_data = data.select_dtypes(include=[np.number]).dropna()
            if numeric_data.empty:
                return {'error': '没有可用的数值型数据'}
            
            outlier_indices = set()
            outlier_details = {}
            
            for column in numeric_data.columns:
                series = numeric_data[column]
                
                if method == 'iqr':
                    Q1 = series.quantile(0.25)
                    Q3 = series.quantile(0.75)
                    IQR = Q3 - Q1
                    lower_bound = Q1 - threshold * IQR
                    upper_bound = Q3 + threshold * IQR
                    column_outliers = series[(series < lower_bound) | (series > upper_bound)].index
                    
                elif method == 'zscore':
                    z_scores = np.abs(stats.zscore(series))
                    column_outliers = series[z_scores > threshold].index
                    
                elif method == 'modified_zscore':
                    median = series.median()
                    mad = np.median(np.abs(series - median))
                    modified_z_scores = 0.6745 * (series - median) / mad
                    column_outliers = series[np.abs(modified_z_scores) > threshold].index
                    
                else:
                    raise ValueError(f"不支持的方法: {method}")
                
                outlier_indices.update(column_outliers)
                outlier_details[column] = {
                    'outlier_indices': column_outliers.tolist(),
                    'n_outliers': len(column_outliers)
                }
            
            outlier_indices = list(outlier_indices)
            normal_indices = [i for i in numeric_data.index if i not in outlier_indices]
            
            return {
                'method': f'statistical_{method}',
                'threshold': threshold,
                'outlier_indices': outlier_indices,
                'normal_indices': normal_indices,
                'n_outliers': len(outlier_indices),
                'outlier_percentage': len(outlier_indices) / len(numeric_data) * 100,
                'outlier_details': outlier_details,
                'features_used': numeric_data.columns.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"统计异常检测失败: {e}")
            return {'error': str(e)}

class TimeSeriesAnalyzer:
    """时间序列分析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
    
    def trend_analysis(self, data: pd.Series, time_column: Optional[pd.Series] = None) -> Dict[str, Any]:
        """
        趋势分析
        
        Args:
            data: 时间序列数据
            time_column: 时间列
            
        Returns:
            趋势分析结果
        """
        try:
            if time_column is None:
                time_column = pd.Series(range(len(data)))
            
            # 线性趋势
            slope, intercept, r_value, p_value, std_err = stats.linregress(time_column, data)
            
            # 趋势方向
            if p_value < 0.05:
                if slope > 0:
                    trend_direction = 'increasing'
                    trend_description = '显著上升趋势'
                else:
                    trend_direction = 'decreasing'
                    trend_description = '显著下降趋势'
            else:
                trend_direction = 'stable'
                trend_description = '无显著趋势'
            
            # 计算趋势强度
            trend_strength = abs(r_value)
            
            # 移动平均
            window_size = min(len(data) // 4, 10)
            if window_size > 1:
                moving_avg = data.rolling(window=window_size).mean()
            else:
                moving_avg = data
            
            return {
                'slope': slope,
                'intercept': intercept,
                'r_squared': r_value ** 2,
                'p_value': p_value,
                'std_error': std_err,
                'trend_direction': trend_direction,
                'trend_description': trend_description,
                'trend_strength': trend_strength,
                'moving_average': moving_avg.tolist(),
                'window_size': window_size
            }
            
        except Exception as e:
            self.logger.error(f"趋势分析失败: {e}")
            return {'error': str(e)}
    
    def seasonality_detection(self, data: pd.Series, period: Optional[int] = None) -> Dict[str, Any]:
        """
        季节性检测
        
        Args:
            data: 时间序列数据
            period: 周期长度
            
        Returns:
            季节性检测结果
        """
        try:
            # 自动检测周期
            if period is None:
                # 使用FFT检测主要频率
                fft = np.fft.fft(data - data.mean())
                freqs = np.fft.fftfreq(len(data))
                
                # 找到最强的频率（排除直流分量）
                power = np.abs(fft[1:len(fft)//2])
                if len(power) > 0:
                    dominant_freq_idx = np.argmax(power) + 1
                    period = int(1 / abs(freqs[dominant_freq_idx])) if freqs[dominant_freq_idx] != 0 else len(data) // 4
                else:
                    period = len(data) // 4
            
            period = max(2, min(period, len(data) // 2))
            
            # 计算自相关
            autocorr = []
            for lag in range(1, min(period * 2, len(data) // 2)):
                if lag < len(data):
                    corr = np.corrcoef(data[:-lag], data[lag:])[0, 1]
                    autocorr.append(corr if not np.isnan(corr) else 0)
                else:
                    autocorr.append(0)
            
            # 检测周期性峰值
            if len(autocorr) > 0:
                peaks, _ = find_peaks(autocorr, height=0.3, distance=period//2)
                
                # 季节性强度
                if len(peaks) > 0:
                    seasonality_strength = max([autocorr[p] for p in peaks])
                    is_seasonal = seasonality_strength > 0.5
                else:
                    seasonality_strength = 0
                    is_seasonal = False
            else:
                seasonality_strength = 0
                is_seasonal = False
                peaks = []
            
            # 季节性分解（简单方法）
            if period > 1 and len(data) >= period * 2:
                # 计算季节性成分
                seasonal_pattern = []
                for i in range(period):
                    values = [data[j] for j in range(i, len(data), period)]
                    seasonal_pattern.append(np.mean(values))
                
                # 扩展到整个序列
                seasonal_component = [seasonal_pattern[i % period] for i in range(len(data))]
                
                # 去趋势
                detrended = data - pd.Series(seasonal_component)
            else:
                seasonal_component = [0] * len(data)
                detrended = data
            
            return {
                'period': period,
                'is_seasonal': is_seasonal,
                'seasonality_strength': seasonality_strength,
                'autocorrelation': autocorr,
                'seasonal_peaks': peaks.tolist(),
                'seasonal_component': seasonal_component,
                'detrended_data': detrended.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"季节性检测失败: {e}")
            return {'error': str(e)}
    
    def forecasting(self, data: pd.Series, forecast_periods: int = 5, 
                   method: str = 'linear') -> Dict[str, Any]:
        """
        时间序列预测
        
        Args:
            data: 时间序列数据
            forecast_periods: 预测期数
            method: 预测方法 ('linear', 'exponential', 'moving_average')
            
        Returns:
            预测结果
        """
        try:
            if len(data) < 3:
                return {'error': '数据点太少，无法进行预测'}
            
            time_index = np.arange(len(data))
            future_time = np.arange(len(data), len(data) + forecast_periods)
            
            if method == 'linear':
                # 线性回归预测
                slope, intercept, r_value, p_value, std_err = stats.linregress(time_index, data)
                forecast = slope * future_time + intercept
                
                # 计算置信区间
                residuals = data - (slope * time_index + intercept)
                mse = np.mean(residuals ** 2)
                confidence_interval = 1.96 * np.sqrt(mse)  # 95%置信区间
                
                forecast_upper = forecast + confidence_interval
                forecast_lower = forecast - confidence_interval
                
                model_info = {
                    'slope': slope,
                    'intercept': intercept,
                    'r_squared': r_value ** 2,
                    'p_value': p_value
                }
                
            elif method == 'exponential':
                # 指数平滑
                alpha = 0.3  # 平滑参数
                smoothed = [data.iloc[0]]
                
                for i in range(1, len(data)):
                    smoothed.append(alpha * data.iloc[i] + (1 - alpha) * smoothed[-1])
                
                # 预测
                last_value = smoothed[-1]
                forecast = [last_value] * forecast_periods
                
                # 简单的置信区间估计
                residuals = data - pd.Series(smoothed)
                std_residual = np.std(residuals)
                confidence_interval = 1.96 * std_residual
                
                forecast_upper = forecast + confidence_interval
                forecast_lower = forecast - confidence_interval
                
                model_info = {
                    'alpha': alpha,
                    'last_smoothed_value': last_value
                }
                
            elif method == 'moving_average':
                # 移动平均预测
                window_size = min(len(data) // 3, 5)
                window_size = max(window_size, 1)
                
                moving_avg = data.rolling(window=window_size).mean()
                last_avg = moving_avg.iloc[-1]
                
                forecast = [last_avg] * forecast_periods
                
                # 置信区间
                recent_data = data.iloc[-window_size:]
                std_recent = np.std(recent_data)
                confidence_interval = 1.96 * std_recent
                
                forecast_upper = forecast + confidence_interval
                forecast_lower = forecast - confidence_interval
                
                model_info = {
                    'window_size': window_size,
                    'last_average': last_avg
                }
                
            else:
                raise ValueError(f"不支持的预测方法: {method}")
            
            return {
                'method': method,
                'forecast_periods': forecast_periods,
                'forecast': forecast.tolist() if hasattr(forecast, 'tolist') else forecast,
                'forecast_upper': forecast_upper.tolist() if hasattr(forecast_upper, 'tolist') else forecast_upper,
                'forecast_lower': forecast_lower.tolist() if hasattr(forecast_lower, 'tolist') else forecast_lower,
                'model_info': model_info,
                'original_data': data.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"时间序列预测失败: {e}")
            return {'error': str(e)}
    
    def change_point_detection(self, data: pd.Series, min_size: int = 5) -> Dict[str, Any]:
        """
        变点检测
        
        Args:
            data: 时间序列数据
            min_size: 最小段长度
            
        Returns:
            变点检测结果
        """
        try:
            if len(data) < min_size * 2:
                return {'error': '数据长度不足以进行变点检测'}
            
            change_points = []
            change_scores = []
            
            # 简单的方差变化检测
            for i in range(min_size, len(data) - min_size):
                left_segment = data.iloc[:i]
                right_segment = data.iloc[i:]
                
                # 计算两段的方差差异
                left_var = np.var(left_segment)
                right_var = np.var(right_segment)
                
                # 计算均值差异
                left_mean = np.mean(left_segment)
                right_mean = np.mean(right_segment)
                
                # 综合得分
                var_change = abs(left_var - right_var) / (left_var + right_var + 1e-8)
                mean_change = abs(left_mean - right_mean) / (abs(left_mean) + abs(right_mean) + 1e-8)
                
                score = var_change + mean_change
                change_scores.append(score)
                
                # 如果得分超过阈值，认为是变点
                if score > np.percentile(change_scores, 90):  # 动态阈值
                    change_points.append(i)
            
            # 去除过于接近的变点
            filtered_change_points = []
            for cp in change_points:
                if not filtered_change_points or cp - filtered_change_points[-1] >= min_size:
                    filtered_change_points.append(cp)
            
            # 分析各段特征
            segments = []
            start_idx = 0
            
            for cp in filtered_change_points + [len(data)]:
                segment_data = data.iloc[start_idx:cp]
                if len(segment_data) > 0:
                    segments.append({
                        'start': start_idx,
                        'end': cp,
                        'length': len(segment_data),
                        'mean': np.mean(segment_data),
                        'std': np.std(segment_data),
                        'trend': 'increasing' if segment_data.iloc[-1] > segment_data.iloc[0] else 'decreasing'
                    })
                start_idx = cp
            
            return {
                'change_points': filtered_change_points,
                'n_change_points': len(filtered_change_points),
                'change_scores': change_scores,
                'segments': segments,
                'n_segments': len(segments)
            }
            
        except Exception as e:
            self.logger.error(f"变点检测失败: {e}")
            return {'error': str(e)}


class AssociationRuleAnalyzer:
    """关联规则分析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
    
    def apriori_analysis(self, data: pd.DataFrame, min_support: float = 0.1, 
                        min_confidence: float = 0.5, min_lift: float = 1.0) -> Dict[str, Any]:
        """
        Apriori关联规则分析
        
        Args:
            data: 输入数据（应为事务型数据）
            min_support: 最小支持度
            min_confidence: 最小置信度
            min_lift: 最小提升度
            
        Returns:
            关联规则分析结果
        """
        try:
            # 数据预处理 - 转换为事务格式
            if data.empty:
                return {'error': '数据为空'}
            
            # 将数据转换为布尔型矩阵
            binary_data = pd.get_dummies(data.astype(str))
            
            # 计算项集支持度
            item_support = binary_data.mean()
            frequent_items = item_support[item_support >= min_support]
            
            if len(frequent_items) < 2:
                return {'error': f'满足最小支持度({min_support})的项目少于2个'}
            
            # 生成频繁项集
            frequent_itemsets = []
            rules = []
            
            # 1-项集
            for item in frequent_items.index:
                frequent_itemsets.append({
                    'itemset': [item],
                    'support': frequent_items[item]
                })
            
            # 2-项集和关联规则
            for i, item1 in enumerate(frequent_items.index):
                for item2 in frequent_items.index[i+1:]:
                    # 计算联合支持度
                    joint_support = (binary_data[item1] & binary_data[item2]).mean()
                    
                    if joint_support >= min_support:
                        frequent_itemsets.append({
                            'itemset': [item1, item2],
                            'support': joint_support
                        })
                        
                        # 生成规则 item1 -> item2
                        confidence1 = joint_support / frequent_items[item1]
                        lift1 = confidence1 / frequent_items[item2]
                        
                        if confidence1 >= min_confidence and lift1 >= min_lift:
                            rules.append({
                                'antecedent': [item1],
                                'consequent': [item2],
                                'support': joint_support,
                                'confidence': confidence1,
                                'lift': lift1
                            })
                        
                        # 生成规则 item2 -> item1
                        confidence2 = joint_support / frequent_items[item2]
                        lift2 = confidence2 / frequent_items[item1]
                        
                        if confidence2 >= min_confidence and lift2 >= min_lift:
                            rules.append({
                                'antecedent': [item2],
                                'consequent': [item1],
                                'support': joint_support,
                                'confidence': confidence2,
                                'lift': lift2
                            })
            
            # 按置信度排序规则
            rules.sort(key=lambda x: x['confidence'], reverse=True)
            
            return {
                'frequent_itemsets': frequent_itemsets,
                'association_rules': rules,
                'n_frequent_itemsets': len(frequent_itemsets),
                'n_rules': len(rules),
                'min_support': min_support,
                'min_confidence': min_confidence,
                'min_lift': min_lift,
                'total_transactions': len(data)
            }
            
        except Exception as e:
            self.logger.error(f"关联规则分析失败: {e}")
            return {'error': str(e)}


class RegressionAnalyzer:
    """回归分析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
    
    def linear_regression(self, data: pd.DataFrame, target_column: str, 
                         feature_columns: Optional[List[str]] = None) -> Dict[str, Any]:
        """
        线性回归分析
        
        Args:
            data: 输入数据
            target_column: 目标变量列名
            feature_columns: 特征变量列名列表
            
        Returns:
            线性回归分析结果
        """
        try:
            if target_column not in data.columns:
                return {'error': f'目标变量 {target_column} 不存在'}
            
            # 选择特征列
            if feature_columns is None:
                numeric_columns = data.select_dtypes(include=[np.number]).columns.tolist()
                feature_columns = [col for col in numeric_columns if col != target_column]
            
            if not feature_columns:
                return {'error': '没有可用的特征变量'}
            
            # 准备数据
            analysis_data = data[feature_columns + [target_column]].dropna()
            
            if len(analysis_data) < 3:
                return {'error': '数据点太少，无法进行回归分析'}
            
            X = analysis_data[feature_columns]
            y = analysis_data[target_column]
            
            # 多元线性回归
            if len(feature_columns) == 1:
                # 简单线性回归
                slope, intercept, r_value, p_value, std_err = stats.linregress(X.iloc[:, 0], y)
                
                coefficients = [slope]
                r_squared = r_value ** 2
                
                # 预测值
                y_pred = slope * X.iloc[:, 0] + intercept
                
            else:
                # 多元线性回归
                from sklearn.linear_model import LinearRegression
                from sklearn.metrics import r2_score
                
                model = LinearRegression()
                model.fit(X, y)
                
                coefficients = model.coef_.tolist()
                intercept = model.intercept_
                y_pred = model.predict(X)
                r_squared = r2_score(y, y_pred)
                
                # 计算p值（简化版本）
                residuals = y - y_pred
                mse = np.mean(residuals ** 2)
                std_err = np.sqrt(mse)
                p_value = 0.05 if r_squared > 0.1 else 0.5  # 简化的p值估计
            
            # 残差分析
            residuals = y - y_pred
            
            # 模型评估指标
            mae = np.mean(np.abs(residuals))
            mse = np.mean(residuals ** 2)
            rmse = np.sqrt(mse)
            
            # 特征重要性
            feature_importance = pd.Series(np.abs(coefficients), index=feature_columns).sort_values(ascending=False)
            
            return {
                'model_type': 'linear_regression',
                'target_variable': target_column,
                'feature_variables': feature_columns,
                'coefficients': dict(zip(feature_columns, coefficients)),
                'intercept': intercept,
                'r_squared': r_squared,
                'adjusted_r_squared': 1 - (1 - r_squared) * (len(analysis_data) - 1) / (len(analysis_data) - len(feature_columns) - 1),
                'p_value': p_value,
                'standard_error': std_err,
                'mae': mae,
                'mse': mse,
                'rmse': rmse,
                'feature_importance': feature_importance.to_dict(),
                'predictions': y_pred.tolist(),
                'residuals': residuals.tolist(),
                'n_observations': len(analysis_data)
            }
            
        except Exception as e:
            self.logger.error(f"线性回归分析失败: {e}")
            return {'error': str(e)}
    
    def polynomial_regression(self, data: pd.DataFrame, target_column: str, 
                            feature_column: str, degree: int = 2) -> Dict[str, Any]:
        """
        多项式回归分析
        
        Args:
            data: 输入数据
            target_column: 目标变量列名
            feature_column: 特征变量列名
            degree: 多项式次数
            
        Returns:
            多项式回归分析结果
        """
        try:
            if target_column not in data.columns or feature_column not in data.columns:
                return {'error': '指定的列不存在'}
            
            # 准备数据
            analysis_data = data[[feature_column, target_column]].dropna()
            
            if len(analysis_data) < degree + 2:
                return {'error': f'数据点太少，无法进行{degree}次多项式回归'}
            
            X = analysis_data[feature_column]
            y = analysis_data[target_column]
            
            # 多项式回归
            coefficients = np.polyfit(X, y, degree)
            poly_func = np.poly1d(coefficients)
            
            # 预测值
            y_pred = poly_func(X)
            
            # 模型评估
            r_squared = 1 - np.sum((y - y_pred) ** 2) / np.sum((y - np.mean(y)) ** 2)
            
            residuals = y - y_pred
            mae = np.mean(np.abs(residuals))
            mse = np.mean(residuals ** 2)
            rmse = np.sqrt(mse)
            
            return {
                'model_type': 'polynomial_regression',
                'target_variable': target_column,
                'feature_variable': feature_column,
                'degree': degree,
                'coefficients': coefficients.tolist(),
                'polynomial_equation': str(poly_func),
                'r_squared': r_squared,
                'mae': mae,
                'mse': mse,
                'rmse': rmse,
                'predictions': y_pred.tolist(),
                'residuals': residuals.tolist(),
                'n_observations': len(analysis_data)
            }
            
        except Exception as e:
            self.logger.error(f"多项式回归分析失败: {e}")
            return {'error': str(e)}