#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
时间序列智能分析系统(TimeSeriesIQ)
版本: 1.0.0
作者: TimeSeriesIQ 开发团队
日期: 2023-11-10
描述: 提供全面的时间序列数据分析、预测与异常检测功能
"""

import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from tensorflow.keras.models import Sequential, load_model, Model
from tensorflow.keras.layers import Dense, LSTM, Dropout, Input, Conv1D, GRU
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import warnings

warnings.filterwarnings('ignore')

class TimeSeriesDataProcessor:
    """时间序列数据预处理类"""
    
    def __init__(self):
        """初始化数据处理器"""
        self.data = None
        self.original_data = None
        self.time_column = None
        self.target_column = None
        self.scaler = None
        
    def load_data(self, file_path, time_col=0, target_col=1, sep=',', date_format=None):
        """
        加载时间序列数据
        
        参数:
            file_path (str): 数据文件路径
            time_col (int/str): 时间列索引或名称
            target_col (int/str): 目标变量列索引或名称
            sep (str): 分隔符 (CSV文件)
            date_format (str): 日期格式 (例如 '%Y-%m-%d')
        
        返回:
            pandas.DataFrame: 加载的数据
        """
        try:
            if file_path.endswith('.csv'):
                self.data = pd.read_csv(file_path, sep=sep)
            elif file_path.endswith(('.xls', '.xlsx')):
                self.data = pd.read_excel(file_path)
            else:
                raise ValueError("不支持的文件格式，请使用CSV或Excel文件")
            
            # 保存原始数据副本
            self.original_data = self.data.copy()
            
            # 设置时间列和目标列
            if isinstance(time_col, int):
                self.time_column = self.data.columns[time_col]
            else:
                self.time_column = time_col
                
            if isinstance(target_col, int):
                self.target_column = self.data.columns[target_col]
            else:
                self.target_column = target_col
            
            # 转换时间列为datetime类型
            if date_format:
                self.data[self.time_column] = pd.to_datetime(self.data[self.time_column], format=date_format)
            else:
                self.data[self.time_column] = pd.to_datetime(self.data[self.time_column])
                
            # 设置时间索引
            self.data.set_index(self.time_column, inplace=True)
            
            print(f"成功加载数据，共{len(self.data)}行")
            return self.data
        
        except Exception as e:
            print(f"加载数据时出错: {str(e)}")
            return None
    
    def handle_missing_values(self, method='interpolate', max_gap=5):
        """
        处理缺失值
        
        参数:
            method (str): 处理方法 ('interpolate', 'ffill', 'bfill', 'drop', 'mean')
            max_gap (int): 使用插值法时的最大间隔
        
        返回:
            pandas.DataFrame: 处理后的数据
        """
        if self.data is None:
            print("请先加载数据")
            return None
        
        # 记录原始缺失值数量
        missing_before = self.data[self.target_column].isna().sum()
        
        if method == 'interpolate':
            self.data[self.target_column] = self.data[self.target_column].interpolate(method='time', limit=max_gap)
        elif method == 'ffill':
            self.data[self.target_column] = self.data[self.target_column].ffill()
        elif method == 'bfill':
            self.data[self.target_column] = self.data[self.target_column].bfill()
        elif method == 'drop':
            self.data = self.data.dropna(subset=[self.target_column])
        elif method == 'mean':
            self.data[self.target_column] = self.data[self.target_column].fillna(
                self.data[self.target_column].mean()
            )
        
        # 记录处理后的缺失值数量
        missing_after = self.data[self.target_column].isna().sum()
        print(f"缺失值处理: {missing_before} -> {missing_after}")
        
        return self.data
    
    def detect_outliers(self, method='iqr', threshold=1.5, window_size=10):
        """
        检测异常值
        
        参数:
            method (str): 检测方法 ('iqr', 'zscore', 'rolling')
            threshold (float): 异常值阈值
            window_size (int): 滚动窗口大小(用于rolling方法)
        
        返回:
            pandas.Series: 异常值索引
        """
        if self.data is None:
            print("请先加载数据")
            return None
        
        series = self.data[self.target_column]
        
        if method == 'iqr':
            q1 = series.quantile(0.25)
            q3 = series.quantile(0.75)
            iqr = q3 - q1
            lower_bound = q1 - threshold * iqr
            upper_bound = q3 + threshold * iqr
            outliers = series[(series < lower_bound) | (series > upper_bound)]
        
        elif method == 'zscore':
            mean = series.mean()
            std = series.std()
            z_scores = np.abs((series - mean) / std)
            outliers = series[z_scores > threshold]
        
        elif method == 'rolling':
            rolling_mean = series.rolling(window=window_size).mean()
            rolling_std = series.rolling(window=window_size).std()
            z_scores = np.abs((series - rolling_mean) / rolling_std)
            outliers = series[z_scores > threshold].dropna()
        
        print(f"检测到{len(outliers)}个异常值")
        return outliers
    
    def replace_outliers(self, outliers_idx, method='mean', window_size=10):
        """
        替换异常值
        
        参数:
            outliers_idx (pandas.Series): 异常值索引
            method (str): 替换方法 ('mean', 'median', 'interpolate', 'rolling_mean')
            window_size (int): 用于计算滚动窗口值的窗口大小
            
        返回:
            pandas.DataFrame: 处理后的数据
        """
        if self.data is None or outliers_idx is None:
            print("请先加载数据并检测异常值")
            return None
        
        for idx in outliers_idx.index:
            if method == 'mean':
                self.data.loc[idx, self.target_column] = self.data[self.target_column].mean()
            
            elif method == 'median':
                self.data.loc[idx, self.target_column] = self.data[self.target_column].median()
            
            elif method == 'interpolate':
                # 暂时将异常值设为NaN然后进行插值
                self.data.loc[idx, self.target_column] = np.nan
                self.data[self.target_column] = self.data[self.target_column].interpolate()
            
            elif method == 'rolling_mean':
                # 使用异常值前后的滚动平均值
                window = self.data[self.target_column].rolling(window=window_size, center=True).mean()
                self.data.loc[idx, self.target_column] = window.loc[idx]
        
        print(f"已替换{len(outliers_idx)}个异常值")
        return self.data
    
    def normalize_data(self, method='standard'):
        """
        数据标准化
        
        参数:
            method (str): 标准化方法 ('standard', 'minmax')
        
        返回:
            pandas.DataFrame: 标准化后的数据
        """
        if self.data is None:
            print("请先加载数据")
            return None
        
        if method == 'standard':
            self.scaler = StandardScaler()
        elif method == 'minmax':
            self.scaler = MinMaxScaler()
        
        # 保存原始数据备份
        self.data_before_scaling = self.data.copy()
        
        # 标准化目标变量
        self.data[self.target_column] = self.scaler.fit_transform(
            self.data[[self.target_column]])
        
        print(f"已使用{method}方法标准化数据")
        return self.data
    
    def inverse_transform(self, data):
        """
        反向转换标准化后的数据
        
        参数:
            data (array-like): 标准化后的数据
            
        返回:
            array: 原始尺度的数据
        """
        if self.scaler is None:
            print("未找到标准化器，请先标准化数据")
            return data
        
        # 确保数据是二维的
        if len(data.shape) == 1:
            data = data.reshape(-1, 1)
        
        return self.scaler.inverse_transform(data)
    
    def decompose_time_series(self, period=None, model='additive'):
        """
        时间序列分解(趋势、季节性、残差)
        
        参数:
            period (int): 季节周期长度，None表示自动检测
            model (str): 分解模型('additive'或'multiplicative')
            
        返回:
            statsmodels结果对象: 包含分解结果
        """
        if self.data is None:
            print("请先加载数据")
            return None
        
        # 如果未指定周期，尝试自动检测
        if period is None:
            # 使用自相关函数寻找第一个显著的峰值
            acf_values = pd.Series(
                sm.tsa.acf(self.data[self.target_column], nlags=365)
            )
            # 找到第一个大于0.5的值，这个阈值可以调整
            potential_periods = acf_values[acf_values > 0.5].index.tolist()
            if len(potential_periods) > 1:
                period = potential_periods[1]  # 第一个通常是lag=0
                print(f"自动检测到的季节周期: {period}")
            else:
                period = 1  # 默认为无季节性
                print("未检测到明显的季节性，使用默认周期1")
        
        try:
            result = seasonal_decompose(
                self.data[self.target_column],
                model=model,
                period=period
            )
            return result
        except Exception as e:
            print(f"时间序列分解出错: {str(e)}")
            return None
    
    def create_features(self, lag_features=True, rolling_features=True, datetime_features=True):
        """
        创建时间序列特征
        
        参数:
            lag_features (bool): 是否创建滞后特征
            rolling_features (bool): 是否创建滚动窗口特征
            datetime_features (bool): 是否创建日期时间特征
            
        返回:
            pandas.DataFrame: 带有新特征的数据框
        """
        if self.data is None:
            print("请先加载数据")
            return None
        
        # 创建副本避免修改原始数据
        df = self.data.copy()
        
        # 添加滞后特征
        if lag_features:
            for lag in [1, 7, 14, 30]:  # 常用滞后周期
                if lag < len(df):
                    df[f'lag_{lag}'] = df[self.target_column].shift(lag)
        
        # 添加滚动窗口特征
        if rolling_features:
            for window in [7, 14, 30]:
                if window < len(df):
                    df[f'rolling_mean_{window}'] = df[self.target_column].rolling(window=window).mean()
                    df[f'rolling_std_{window}'] = df[self.target_column].rolling(window=window).std()
        
        # 添加日期时间特征
        if datetime_features:
            # 确保索引是datetime类型
            if not isinstance(df.index, pd.DatetimeIndex):
                print("索引不是日期类型，无法创建日期特征")
            else:
                df['hour'] = df.index.hour
                df['day'] = df.index.day
                df['month'] = df.index.month
                df['year'] = df.index.year
                df['dayofweek'] = df.index.dayofweek
                df['quarter'] = df.index.quarter
                df['is_weekend'] = df.index.dayofweek.isin([5, 6]).astype(int)
        
        # 删除NaN值
        df = df.dropna()
        print(f"创建了{df.shape[1] - 1}个特征，有效行数: {len(df)}")
        
        return df


class TimeSeriesAnalyzer:
    """时间序列分析类"""
    
    def __init__(self, data_processor=None):
        """
        初始化分析器
        
        参数:
            data_processor (TimeSeriesDataProcessor): 数据预处理器实例
        """
        self.dp = data_processor
        if data_processor is None:
            self.dp = TimeSeriesDataProcessor()
        
        self.data = None
        self.target_column = None
        self.models = {}
        self.forecast_results = {}
    
    def set_data(self, data, target_column):
        """
        设置分析数据
        
        参数:
            data (pandas.DataFrame): 时间序列数据
            target_column (str): 目标变量列名
        """
        self.data = data
        self.target_column = target_column
    
    def check_stationarity(self, window=10):
        """
        检查时间序列是否平稳
        
        参数:
            window (int): 滚动窗口大小
            
        返回:
            dict: 包含ADF测试结果和滚动统计信息
        """
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None
        
        series = self.data[self.target_column]
        
        # ADF测试
        result = adfuller(series.dropna())
        
        adf_result = {
            'test_statistic': result[0],
            'p_value': result[1],
            'critical_values': result[4],
            'is_stationary': result[1] < 0.05
        }
        
        # 计算滚动统计量
        rolling_mean = series.rolling(window=window).mean()
        rolling_std = series.rolling(window=window).std()
        
        return {
            'adf_test': adf_result,
            'rolling_mean': rolling_mean,
            'rolling_std': rolling_std
        }
    
    def make_stationary(self, method='diff', order=1):
        """
        将时间序列转换为平稳序列
        
        参数:
            method (str): 转换方法 ('diff', 'log', 'pct_change')
            order (int): 差分阶数
            
        返回:
            pandas.Series: 平稳序列
        """
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None
        
        series = self.data[self.target_column].copy()
        original_series = series.copy()
        
        if method == 'diff':
            stationary_series = series.diff(order).dropna()
        elif method == 'log':
            if (series <= 0).any():
                print("数据包含非正值，无法进行对数转换")
                return None
            stationary_series = np.log(series)
        elif method == 'pct_change':
            stationary_series = series.pct_change().dropna()
        else:
            print(f"不支持的方法: {method}")
            return None
        
        # 检查转换后序列的平稳性
        adf_result = adfuller(stationary_series.dropna())
        is_stationary = adf_result[1] < 0.05
        
        print(f"转换方法: {method}")
        print(f"ADF测试p值: {adf_result[1]:.4f}")
        print(f"序列{'是' if is_stationary else '不是'}平稳的")
        
        # 保存转换信息，用于后续逆转换
        self.stationarity_transform = {
            'method': method,
            'order': order,
            'original_series': original_series,
            'is_stationary': is_stationary
        }
        
        return stationary_series
    
    def inverse_stationarity_transform(self, transformed_series, start_idx=None):
        """
        逆转平稳性转换，恢复原始尺度
        
        参数:
            transformed_series (pandas.Series): 转换后的序列
            start_idx: 起始索引(用于预测值)
            
        返回:
            pandas.Series: 原始尺度的序列
        """
        if not hasattr(self, 'stationarity_transform'):
            print("未找到转换信息")
            return transformed_series
        
        method = self.stationarity_transform['method']
        original = self.stationarity_transform['original_series']
        
        if method == 'diff':
            order = self.stationarity_transform['order']
            last_values = original.iloc[-order:].values
            
            # 针对多阶差分
            if order == 1:
                cumsum = np.concatenate([last_values, transformed_series])
                for i in range(1, len(cumsum)):
                    cumsum[i] = cumsum[i] + cumsum[i-1]
                return pd.Series(cumsum[1:], index=transformed_series.index)
            else:
                # 高阶差分逆转换更复杂，这里简化处理
                print("高阶差分逆转换未完全实现")
                return transformed_series
        
        elif method == 'log':
            return np.exp(transformed_series)
        
        elif method == 'pct_change':
            if start_idx is None:
                start_value = original.iloc[-1]
            else:
                start_value = original.loc[start_idx]
                
            result = [start_value]
            for change in transformed_series:
                next_value = result[-1] * (1 + change)
                result.append(next_value)
            
            return pd.Series(result[1:], index=transformed_series.index)
        
        return transformed_series
    
    def fit_arima(self, p=1, d=0, q=1, seasonal_order=(0,0,0,0), train_size=0.8):
        """
        拟合ARIMA/SARIMA模型
        
        参数:
            p (int): AR阶数
            d (int): 差分阶数
            q (int): MA阶数
            seasonal_order (tuple): 季节参数 (P,D,Q,s)
            train_size (float): 训练集比例
            
        返回:
            模型和预测结果
        """
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None
        
        series = self.data[self.target_column]
        n = len(series)
        train_idx = int(n * train_size)
        
        train = series[:train_idx]
        test = series[train_idx:]
        
        # 使用SARIMA模型
        if any(seasonal_order):
            model = SARIMAX(
                train,
                order=(p, d, q),
                seasonal_order=seasonal_order,
                enforce_stationarity=False
            )
            model_type = 'SARIMA'
        else:
            model = ARIMA(train, order=(p, d, q))
            model_type = 'ARIMA'
        
        try:
            fitted_model = model.fit()
            
            # 预测测试集
            prediction = fitted_model.forecast(len(test))
            
            # 计算性能指标
            mae = mean_absolute_error(test, prediction)
            rmse = np.sqrt(mean_squared_error(test, prediction))
            r2 = r2_score(test, prediction)

            # 使用统一的指标格式
            metrics = {
                'train_mae': mae,
                'train_rmse': rmse,
                'test_mae': mae,
                'test_rmse': rmse,
                'r2': r2
            }

            # 保存模型和结果
            self.models[model_type.lower()] = fitted_model
            self.forecast_results[model_type.lower()] = {
                'train': train,
                'test': test,
                'prediction': prediction,
                'train_predict': prediction,
                'test_predict': prediction,
                'metrics': {
                    'train_mae': mae,
                    'train_rmse': rmse,
                    'test_mae': mae,
                    'test_rmse': rmse,
                    'r2': r2
                }
            }

            # 打印性能指标
            print(f"{model_type}模型拟合完成")
            print(f"MAE: {mae:.4f}")
            print(f"RMSE: {rmse:.4f}")
            print(f"R²: {r2:.4f}")
            
            print(f"{model_type}模型拟合完成")
            if model_type.lower() in ['arima', 'sarima']:
                mae = metrics['train_mae']  # 使用训练集指标作为整体指标
                rmse = metrics['train_rmse']
                print(f"MAE: {mae:.4f}")
                print(f"RMSE: {rmse:.4f}")
                print(f"R²: {metrics['r2']:.4f}")
            else:
                print(f"训练集 MAE: {metrics['train_mae']:.4f}")
                print(f"训练集 RMSE: {metrics['train_rmse']:.4f}")
                print(f"测试集 MAE: {metrics['test_mae']:.4f}")
                print(f"测试集 RMSE: {metrics['test_rmse']:.4f}")
            
            return fitted_model, prediction
        
        except Exception as e:
            print(f"模型拟合出错: {str(e)}")
            return None, None
    
    def prepare_lstm_data(self, series, n_steps=10, train_size=0.8):
        """
        准备LSTM模型的序列数据
        
        参数:
            series (pandas.Series): 时间序列
            n_steps (int): 输入序列长度
            train_size (float): 训练集比例
            
        返回:
            训练和测试数据
        """
        # 创建输入序列和目标值
        X, y = [], []
        for i in range(len(series) - n_steps):
            X.append(series[i:i+n_steps])
            y.append(series[i+n_steps])
        
        X = np.array(X)
        y = np.array(y)
        
        # 调整维度以符合LSTM要求 [samples, time_steps, features]
        X = X.reshape(X.shape[0], X.shape[1], 1)
        
        # 分割训练集和测试集
        train_size = int(len(X) * train_size)
        X_train, X_test = X[:train_size], X[train_size:]
        y_train, y_test = y[:train_size], y[train_size:]
        
        return X_train, X_test, y_train, y_test
    
    def fit_lstm(self, n_steps=10, epochs=50, batch_size=32, train_size=0.8, neurons=50):
        """
        拟合LSTM模型
        
        参数:
            n_steps (int): 输入序列长度
            epochs (int): 训练轮数
            batch_size (int): 批处理大小
            train_size (float): 训练集比例
            neurons (int): LSTM单元数量
            
        返回:
            模型和预测结果
        """
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None, None, None
        
        series = self.data[self.target_column]
        
        # 准备LSTM数据
        X_train, X_test, y_train, y_test = self.prepare_lstm_data(
            series, n_steps=n_steps, train_size=train_size
        )
        
        # 构建LSTM模型
        model = Sequential()
        model.add(LSTM(neurons, activation='relu', input_shape=(n_steps, 1)))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse')
        
        # 添加早停和模型检查点
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=10),
            ModelCheckpoint(
                filepath='best_lstm_model.h5',
                monitor='val_loss',
                save_best_only=True
            )
        ]
        
        # 训练模型
        history = model.fit(
            X_train, y_train,
            epochs=epochs,
            batch_size=batch_size,
            validation_split=0.2,
            callbacks=callbacks,
            verbose=1
        )
        
        # 预测
        train_predict = model.predict(X_train)
        test_predict = model.predict(X_test)
        
        # 计算性能指标
        metrics = {
            'train_mae': mean_absolute_error(y_train, train_predict),
            'train_rmse': np.sqrt(mean_squared_error(y_train, train_predict)),
            'test_mae': mean_absolute_error(y_test, test_predict),
            'test_rmse': np.sqrt(mean_squared_error(y_test, test_predict)),
            'r2': r2_score(y_test, test_predict)
        }
        
        # 保存模型和结果
        # 统一使用小写的模型名称
        model_name = 'lstm'
        self.models[model_name] = model
        self.forecast_results[model_name] = {
            'train': y_train,
            'test': y_test,
            'train_predict': train_predict,
            'test_predict': test_predict,
            'prediction': test_predict,  # 兼容ARIMA格式
            'metrics': metrics,
            'history': history.history,
            'X_train': X_train,
            'X_test': X_test,
            'y_train': y_train,
            'y_test': y_test
        }
        
        print("LSTM模型训练完成")
        print(f"训练集 MAE: {metrics['train_mae']:.4f}, RMSE: {metrics['train_rmse']:.4f}")
        print(f"测试集 MAE: {metrics['test_mae']:.4f}, RMSE: {metrics['test_rmse']:.4f}")
        
        return model, train_predict, test_predict
    
    def fit_gru(self, n_steps=10, epochs=50, batch_size=32, train_size=0.8, neurons=50):
        """
        拟合GRU模型
        
        参数:
            n_steps (int): 输入序列长度
            epochs (int): 训练轮数
            batch_size (int): 批处理大小
            train_size (float): 训练集比例
            neurons (int): GRU单元数量
            
        返回:
            模型和预测结果
        """
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None, None, None
        
        series = self.data[self.target_column]
        
        # 准备序列数据
        X_train, X_test, y_train, y_test = self.prepare_lstm_data(
            series, n_steps=n_steps, train_size=train_size
        )
        
        # 构建GRU模型
        model = Sequential()
        model.add(GRU(neurons, activation='relu', input_shape=(n_steps, 1)))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse')
        
        # 添加回调函数
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=10),
            ModelCheckpoint(
                filepath='best_gru_model.h5',
                monitor='val_loss',
                save_best_only=True
            )
        ]
        
        # 训练模型
        history = model.fit(
            X_train, y_train,
            epochs=epochs,
            batch_size=batch_size,
            validation_split=0.2,
            callbacks=callbacks,
            verbose=1
        )
        
        # 预测
        train_predict = model.predict(X_train)
        test_predict = model.predict(X_test)
        
        # 计算性能指标
        metrics = {
            'train_mae': mean_absolute_error(y_train, train_predict),
            'train_rmse': np.sqrt(mean_squared_error(y_train, train_predict)),
            'test_mae': mean_absolute_error(y_test, test_predict),
            'test_rmse': np.sqrt(mean_squared_error(y_test, test_predict)),
            'r2': r2_score(y_test, test_predict)
        }
        
        # 保存模型和结果
        # 统一使用小写的模型名称
        model_name = 'gru'
        self.models[model_name] = model
        self.forecast_results[model_name] = {
            'train': y_train,
            'test': y_test,
            'train_predict': train_predict,
            'test_predict': test_predict,
            'prediction': test_predict,  # 兼容ARIMA格式
            'metrics': metrics,
            'history': history.history,
            'X_train': X_train,
            'X_test': X_test,
            'y_train': y_train,
            'y_test': y_test
        }
        
        print("GRU模型训练完成")
        print(f"训练集 MAE: {metrics['train_mae']:.4f}, RMSE: {metrics['train_rmse']:.4f}")
        print(f"测试集 MAE: {metrics['test_mae']:.4f}, RMSE: {metrics['test_rmse']:.4f}")
        
        return model, train_predict, test_predict
    
    def fit_tcn(self, n_steps=10, train_size=0.8, epochs=50, batch_size=32):
        """
        拟合时间卷积网络(TCN)
        
        参数:
            n_steps (int): 输入序列长度
            train_size (float): 训练集比例
            epochs (int): 训练轮数
            batch_size (int): 批处理大小
            
        返回:
            模型和预测结果
        """
        try:
            from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten
        except ImportError:
            print("需要安装tensorflow")
            return None, None, None
            
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None, None, None
        
        series = self.data[self.target_column]
        
        # 准备序列数据
        X_train, X_test, y_train, y_test = self.prepare_lstm_data(
            series, n_steps=n_steps, train_size=train_size
        )
        
        # 构建TCN模型
        model = Sequential()
        model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_steps, 1)))
        model.add(MaxPooling1D(pool_size=2))
        model.add(Conv1D(filters=32, kernel_size=3, activation='relu'))
        model.add(Flatten())
        model.add(Dense(50, activation='relu'))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse')
        
        # 添加回调函数
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=10),
            ModelCheckpoint(
                filepath='best_tcn_model.h5',
                monitor='val_loss',
                save_best_only=True
            )
        ]
        
        # 训练模型
        history = model.fit(
            X_train, y_train,
            epochs=epochs,
            batch_size=batch_size,
            validation_split=0.2,
            callbacks=callbacks,
            verbose=1
        )
        
        # 预测
        train_predict = model.predict(X_train)
        test_predict = model.predict(X_test)
        
        # 计算性能指标
        metrics = {
            'train_mae': mean_absolute_error(y_train, train_predict),
            'train_rmse': np.sqrt(mean_squared_error(y_train, train_predict)),
            'test_mae': mean_absolute_error(y_test, test_predict),
            'test_rmse': np.sqrt(mean_squared_error(y_test, test_predict)),
            'r2': r2_score(y_test, test_predict)
        }
        
        # 保存模型和结果
        # 统一使用小写的模型名称
        model_name = 'tcn'
        self.models[model_name] = model
        self.forecast_results[model_name] = {
            'train': y_train,
            'test': y_test,
            'train_predict': train_predict,
            'test_predict': test_predict,
            'prediction': test_predict,  # 兼容ARIMA格式
            'metrics': metrics,
            'history': history.history,
            'X_train': X_train,
            'X_test': X_test,
            'y_train': y_train,
            'y_test': y_test
        }
        
        print("TCN模型训练完成")
        print(f"训练集 MAE: {metrics['train_mae']:.4f}, RMSE: {metrics['train_rmse']:.4f}")
        print(f"测试集 MAE: {metrics['test_mae']:.4f}, RMSE: {metrics['test_rmse']:.4f}")
        
        return model, train_predict, test_predict

    def detect_anomalies(self, method='iqr', threshold=1.5, window_size=None):
        """
        检测时间序列中的异常点
        
        参数:
            method (str): 检测方法 ('iqr', 'zscore', 'isolation_forest', 'auto_encoder')
            threshold (float): 异常判断阈值
            window_size (int): 滑动窗口大小，用于局部异常检测
            
        返回:
            pandas.Series: 异常点索引及其得分
        """
        if self.data is None or self.target_column is None:
            print("请先设置数据")
            return None
            
        series = self.data[self.target_column]
        
        # 基于IQR的异常检测
        if method == 'iqr':
            q1 = series.quantile(0.25)
            q3 = series.quantile(0.75)
            iqr = q3 - q1
            
            lower_bound = q1 - threshold * iqr
            upper_bound = q3 + threshold * iqr
            
            # 计算异常分数（偏离边界的程度）
            scores = pd.Series(0, index=series.index)
            # 对于小于下界的点
            lower_mask = series < lower_bound
            if lower_mask.any():
                scores[lower_mask] = (lower_bound - series[lower_mask]) / iqr
            # 对于大于上界的点
            upper_mask = series > upper_bound
            if upper_mask.any():
                scores[upper_mask] = (series[upper_mask] - upper_bound) / iqr
                
            # 标记异常点
            anomalies = scores[scores > 0]
            
        # 基于Z分数的异常检测
        elif method == 'zscore':
            if window_size:
                # 使用滑动窗口计算局部均值和标准差
                rolling_mean = series.rolling(window=window_size).mean()
                rolling_std = series.rolling(window=window_size).std()
                z_scores = np.abs((series - rolling_mean) / rolling_std)
            else:
                # 使用全局均值和标准差
                mean = series.mean()
                std = series.std()
                z_scores = np.abs((series - mean) / std)
                
            # 计算异常分数
            scores = z_scores
            # 标记异常点
            anomalies = scores[scores > threshold]
            
        # 基于孤立森林的异常检测
        elif method == 'isolation_forest':
            try:
                from sklearn.ensemble import IsolationForest
            except ImportError:
                print("需要安装scikit-learn库")
                return None
                
            # 将series转换为二维数组
            X = series.values.reshape(-1, 1)
            
            # 训练孤立森林模型
            model = IsolationForest(contamination=0.05, random_state=42)
            model.fit(X)
            
            # 预测异常分数
            # 孤立森林返回的是决策函数值，越小表示越可能是异常
            # 转换为正值以便于解释：值越大表示越可能是异常
            raw_scores = -model.decision_function(X)
            scores = pd.Series(raw_scores, index=series.index)
            
            # 标记异常点（孤立森林默认返回-1表示异常，1表示正常）
            anomaly_pred = model.predict(X)
            anomalies = scores[anomaly_pred == -1]
            
        # 基于自编码器的异常检测
        elif method == 'auto_encoder':
            try:
                from tensorflow.keras.layers import Input, Dense
                from tensorflow.keras.models import Model as KerasModel
            except ImportError:
                print("需要安装tensorflow库")
                return None
                
            # 准备数据
            X = series.values.reshape(-1, 1)
            
            # 标准化数据
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X)
            
            # 构建自编码器
            input_dim = 1
            encoding_dim = 1
            
            input_layer = Input(shape=(input_dim,))
            encoding_layer = Dense(encoding_dim, activation='relu')(input_layer)
            decoding_layer = Dense(input_dim, activation='linear')(encoding_layer)
            
            autoencoder = KerasModel(inputs=input_layer, outputs=decoding_layer)
            autoencoder.compile(optimizer='adam', loss='mse')
            
            # 训练自编码器
            autoencoder.fit(X_scaled, X_scaled, 
                           epochs=100, batch_size=32, 
                           shuffle=True, verbose=0)
            
            # 预测并计算重构误差
            X_pred = autoencoder.predict(X_scaled)
            mse = np.mean(np.power(X_scaled - X_pred, 2), axis=1)
            
            # 计算异常分数
            scores = pd.Series(mse, index=series.index)
            
            # 使用重构误差的分布来确定阈值
            threshold_value = scores.mean() + threshold * scores.std()
            
            # 标记异常点
            anomalies = scores[scores > threshold_value]
            
        else:
            print(f"不支持的检测方法: {method}")
            return None
            
        print(f"检测到{len(anomalies)}个异常点")
        return anomalies
        
    def plot_forecast_results(self, model_type, figsize=(15, 8)):
        """
        可视化预测结果
        
        参数:
            model_type (str): 模型类型，如'arima', 'lstm', 'gru'等
            figsize (tuple): 图形大小
        """
        if model_type not in self.forecast_results:
            print(f"未找到{model_type}模型的预测结果")
            return
            
        result = self.forecast_results[model_type]
        
        plt.figure(figsize=figsize)
        
        if model_type in ['arima', 'sarima']:
            # 绘制ARIMA/SARIMA模型结果
            train = result['train']
            test = result['test']
            prediction = result['prediction']
            
            plt.plot(train.index, train, label='训练集数据')
            plt.plot(test.index, test, label='测试集数据')
            plt.plot(test.index, prediction, label='预测值', color='red')
            
            # 添加预测和实际值之间的区域填充
            plt.fill_between(
                test.index,
                test,
                prediction,
                color='lightcoral',
                alpha=0.3,
                label='预测误差'
            )
            
            # 添加性能指标文本
            metrics = result['metrics']
            metrics_text = (
                f"Train MAE: {metrics['train_mae']:.4f}\n"
                f"Train RMSE: {metrics['train_rmse']:.4f}\n"
                f"Test MAE: {metrics['test_mae']:.4f}\n"
                f"Test RMSE: {metrics['test_rmse']:.4f}\n"
                f"R²: {metrics['r2']:.4f}"
            )
            plt.text(
                0.02, 0.95, metrics_text,
                transform=plt.gca().transAxes,
                bbox=dict(facecolor='white', alpha=0.8),
                verticalalignment='top'
            )
            
        elif model_type in ['lstm', 'gru', 'tcn']:
            # 绘制深度学习模型结果
            y_train = result['y_train']
            y_test = result['y_test']
            train_predict = result['train_predict']
            test_predict = result['test_predict']
            
            # 创建索引
            n_steps = result['X_train'].shape[1]
            train_idx = np.arange(n_steps, n_steps + len(y_train))
            test_idx = np.arange(n_steps + len(y_train), n_steps + len(y_train) + len(y_test))
            
            # 绘制原始数据
            full_series = np.concatenate([y_train, y_test])
            plt.plot(np.arange(len(full_series)), full_series, label='原始数据', color='blue')
            
            # 绘制预测结果
            plt.plot(train_idx, train_predict, label='训练集预测', color='green')
            plt.plot(test_idx, test_predict, label='测试集预测', color='red')
            
            # 高亮测试区域
            plt.axvspan(test_idx[0], test_idx[-1], alpha=0.2, color='gray')
            plt.text(test_idx[0] + len(test_idx)/2, plt.ylim()[0] + 0.05, '测试区间', 
                    horizontalalignment='center')
            
            # 添加性能指标文本
            metrics = result['metrics']
            metrics_text = (
                f"训练集 MAE: {metrics['train_mae']:.4f}, RMSE: {metrics['train_rmse']:.4f}\n"
                f"测试集 MAE: {metrics['test_mae']:.4f}, RMSE: {metrics['test_rmse']:.4f}"
            )
            plt.text(
                0.02, 0.95, metrics_text,
                transform=plt.gca().transAxes,
                bbox=dict(facecolor='white', alpha=0.8),
                verticalalignment='top'
            )
            
            # 如果有训练历史，添加子图显示损失变化
            if 'history' in result:
                plt.figure(figsize=figsize)
                history = result['history']
                plt.plot(history['loss'], label='训练损失')
                if 'val_loss' in history:
                    plt.plot(history['val_loss'], label='验证损失')
                plt.title(f'{model_type.upper()} 模型训练损失变化')
                plt.xlabel('Epochs')
                plt.ylabel('Loss')
                plt.legend()
                plt.grid(True)
                
        plt.title(f'{model_type.upper()} 模型预测结果')
        plt.xlabel('时间/样本索引')
        plt.ylabel(f'{self.target_column} 值')
        plt.legend()
        plt.grid(True)
        plt.tight_layout()
        plt.show()
        
    def plot_anomalies(self, anomalies, figsize=(15, 8), window=None):
        """
        可视化异常检测结果
        
        参数:
            anomalies (pandas.Series): 异常点及其分数
            figsize (tuple): 图形大小
            window (int): 如果指定，只显示部分数据窗口
        """
        if self.data is None or self.target_column is None or anomalies is None:
            print("数据或异常检测结果无效")
            return
            
        series = self.data[self.target_column]
        
        plt.figure(figsize=figsize)
        
        # 如果指定了窗口，只显示部分数据
        if window and window < len(series):
            start_idx = max(0, len(series) - window)
            plot_series = series.iloc[start_idx:]
            plot_anomalies = anomalies[anomalies.index.isin(plot_series.index)]
        else:
            plot_series = series
            plot_anomalies = anomalies
            
        # 绘制原始时间序列
        plt.plot(plot_series.index, plot_series, label='时间序列数据')
        
        # 绘制异常点
        if not plot_anomalies.empty:
            plt.scatter(
                plot_anomalies.index,
                plot_series[plot_anomalies.index],
                color='red',
                s=100,  # 点的大小
                marker='x',
                label=f'异常点 (共{len(plot_anomalies)}个)'
            )
            
            # 添加异常点注释
            for idx, score in plot_anomalies.items():
                plt.annotate(
                    f'{score:.2f}',
                    xy=(idx, plot_series[idx]),
                    xytext=(5, 10),
                    textcoords='offset points',
                    fontsize=8,
                    bbox=dict(boxstyle="round", fc="yellow", alpha=0.6)
                )
        
        plt.title('时间序列异常检测结果')
        plt.xlabel('时间')
        plt.ylabel(self.target_column)
        plt.legend()
        plt.grid(True)
        plt.tight_layout()
        plt.show()
        
        # 如果异常点较多，还可以绘制异常分数分布图
        if len(anomalies) > 5:
            plt.figure(figsize=(10, 6))
            sns.histplot(anomalies, bins=30, kde=True)
            plt.title('异常分数分布')
            plt.xlabel('异常分数')
            plt.ylabel('频数')
            plt.tight_layout()
            plt.show()
    
    def evaluate_models(self):
        """
        比较所有已训练模型的性能
        
        返回:
            pandas.DataFrame: 模型比较结果
        """
        if not self.forecast_results:
            print("没有训练过模型")
            return None
            
        model_metrics = []
        
        for model_name, results in self.forecast_results.items():
            metrics = results['metrics']
            
            # 统一所有模型的指标格式
            model_data = {'Model': model_name.upper()}
            
            # 所有模型都使用相同的指标格式
            model_data.update({
                'Train MAE': metrics['train_mae'],
                'Train RMSE': metrics['train_rmse'],
                'Test MAE': metrics['test_mae'],
                'Test RMSE': metrics['test_rmse']
            })
            
            # 如果有R²值就添加
            if 'r2' in metrics:
                model_data['R²'] = metrics['r2']
            
            model_metrics.append(model_data)
                
        # 创建比较表格
        df_metrics = pd.DataFrame(model_metrics)
        
        # 可视化比较
        plt.figure(figsize=(12, 6))
        
        # 创建MAE对比图
        ax = plt.subplot(121)
        df_metrics[['Model', 'Train MAE', 'Test MAE']].set_index('Model').plot(kind='bar', ax=ax)
        plt.title('MAE对比')
        plt.ylabel('MAE值')
        
        # 创建RMSE对比图
        ax = plt.subplot(122)
        df_metrics[['Model', 'Train RMSE', 'Test RMSE']].set_index('Model').plot(kind='bar', ax=ax)
        plt.title('RMSE对比')
        plt.ylabel('RMSE值')

        # 如果有R²值，添加注释
        if 'R²' in df_metrics.columns:
            for i, model in enumerate(df_metrics['Model']):
                r2 = df_metrics.loc[df_metrics['Model'] == model, 'R²'].values[0]
                if not np.isnan(r2):
                    plt.text(i, plt.ylim()[0], f'R²: {r2:.4f}',
                            ha='center', va='top', rotation=45)
            
        plt.tight_layout()
        plt.show()
        
        return df_metrics

# 使用示例
if __name__ == "__main__":
    # 创建数据处理器
    dp = TimeSeriesDataProcessor()
    
    # 加载示例数据
    try:
        import pandas_datareader as pdr
        data = pdr.get_data_yahoo('AAPL', start='2019-01-01', end='2022-12-31')
    except:
        # 如果无法加载在线数据，则生成模拟数据
        print("无法加载在线数据，将生成模拟数据...")
        np.random.seed(42)
        dates = pd.date_range(start='2019-01-01', end='2022-12-31', freq='D')
        n = len(dates)
        
        # 生成包含趋势、季节性和噪声的时间序列
        trend = np.linspace(0, 20, n)
        seasonal = 5 * np.sin(2 * np.pi * np.arange(n) / 365) + 2 * np.sin(2 * np.pi * np.arange(n) / 7)
        noise = np.random.normal(0, 1, n)
        
        value = 100 + trend + seasonal + noise
        
        data = pd.DataFrame({
            'Close': value
        }, index=dates)
    
    # 查看数据
    print("数据前5行:")
    print(data.head())
    
    # 设置目标列为收盘价
    dp.data = data
    dp.target_column = 'Close'
    dp.time_column = data.index.name  # 使用日期列作为时间列
    
    # 处理缺失值
    dp.handle_missing_values()
    
    # 检测异常值
    outliers = dp.detect_outliers()
    if len(outliers) > 0:
        print(f"检测到 {len(outliers)} 个异常值，将进行替换...")
        dp.replace_outliers(outliers)
    
    # 创建分析器
    analyzer = TimeSeriesAnalyzer(dp)
    analyzer.set_data(dp.data, dp.target_column)
    
    # 检查平稳性
    stationarity_results = analyzer.check_stationarity()
    print("\n平稳性检验结果:")
    print(f"ADF测试p值: {stationarity_results['adf_test']['p_value']:.6f}")
    print(f"序列{'是' if stationarity_results['adf_test']['is_stationary'] else '不是'}平稳的")
    
    # 拟合ARIMA模型
    print("\n拟合ARIMA模型...")
    arima_model, arima_pred = analyzer.fit_arima(p=2, d=1, q=2)
    
    # 拟合LSTM模型（可能需要较长时间）
    print("\n拟合LSTM模型...")
    lstm_model, lstm_train_pred, lstm_test_pred = analyzer.fit_lstm(n_steps=14, epochs=20)
    
    # 检测异常
    print("\n进行异常检测...")
    anomalies = analyzer.detect_anomalies(method='zscore', threshold=3)
    print(f"检测到 {len(anomalies)} 个异常点")
    
    # 可视化预测结果
    print("\n可视化ARIMA预测结果...")
    analyzer.plot_forecast_results('arima')
    
    print("\n可视化LSTM预测结果...")
    analyzer.plot_forecast_results('lstm')
    
    # 可视化异常检测结果
    if len(anomalies) > 0:
        print("\n可视化异常检测结果...")
        analyzer.plot_anomalies(anomalies)
    
    # 比较模型性能
    print("\n比较模型性能...")
    metrics_df = analyzer.evaluate_models()
    print("\n模型性能指标:")
    print(metrics_df)