#coding:utf-8

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import pickle
import os
from sklearn.preprocessing import StandardScaler
from typing import List, Union, Optional

class TransformerPredictor(nn.Module):
    """
    Transformer模型用于时间序列预测
    """
    def __init__(self, input_dim, d_model=64, nhead=4, num_encoder_layers=2, 
                 num_decoder_layers=2, dim_feedforward=256, dropout=0.1):
        super(TransformerPredictor, self).__init__()
        
        self.d_model = d_model
        self.input_projection = nn.Linear(input_dim, d_model)
        
        # Transformer编码器
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model, 
            nhead=nhead, 
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers)
        
        # Transformer解码器
        decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True
        )
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        
        # 输出层
        self.output_projection = nn.Linear(d_model, 1)
        
        # 位置编码
        self.positional_encoding = self._generate_positional_encoding(5000, d_model)
        
    def _generate_positional_encoding(self, max_len, d_model):
        """生成位置编码"""
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * 
                           (-torch.log(torch.tensor(10000.0)) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        return pe.unsqueeze(0)
    
    def forward(self, src, tgt=None, src_mask=None, tgt_mask=None):
        """
        前向传播
        src: 源序列 [batch_size, src_len, input_dim]
        tgt: 目标序列 [batch_size, tgt_len, input_dim] (训练时使用)
        """
        # 输入投影
        src = self.input_projection(src) * torch.sqrt(torch.tensor(self.d_model, dtype=torch.float32))
        batch_size, src_len, _ = src.shape
        
        # 添加位置编码
        src_pos = self.positional_encoding[:, :src_len, :].to(src.device)
        src = src + src_pos
        
        # 编码器
        memory = self.transformer_encoder(src, src_key_padding_mask=src_mask)
        
        if self.training and tgt is not None:
            # 训练模式：使用解码器
            tgt = self.input_projection(tgt) * torch.sqrt(torch.tensor(self.d_model, dtype=torch.float32))
            tgt_len = tgt.shape[1]
            tgt_pos = self.positional_encoding[:, :tgt_len, :].to(tgt.device)
            tgt = tgt + tgt_pos
            
            # 创建目标掩码（防止看到未来信息）
            if tgt_mask is None:
                tgt_mask = self._generate_square_subsequent_mask(tgt_len).to(tgt.device)
            
            # 解码器
            output = self.transformer_decoder(tgt, memory, tgt_mask=tgt_mask)
            output = self.output_projection(output)
        else:
            # 推理模式：直接使用编码器输出
            output = self.output_projection(memory)
        
        return output
    
    def _generate_square_subsequent_mask(self, sz):
        """生成上三角掩码"""
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask


class TransformerTrainer:
    """Transformer模型训练器"""
    
    def __init__(self, seq_length=10, prediction_length=1):
        self.seq_length = seq_length
        self.prediction_length = prediction_length
        self.scaler_X = StandardScaler()
        self.scaler_y = StandardScaler()
        self.model = None
        
    def prepare_sequences(self, df_X: pd.DataFrame, df_y: pd.Series) -> tuple:
        """
        准备序列数据
        """
        # 标准化数据
        X_scaled = self.scaler_X.fit_transform(df_X.values)
        y_scaled = self.scaler_y.fit_transform(df_y.values.reshape(-1, 1)).flatten()
        
        sequences = []
        targets = []
        
        # 创建序列
        for i in range(len(X_scaled) - self.seq_length - self.prediction_length + 1):
            # 输入序列
            seq = X_scaled[i:(i + self.seq_length)]
            sequences.append(seq)
            
            # 目标值（预测下一个或多个值）
            target = y_scaled[(i + self.seq_length):(i + self.seq_length + self.prediction_length)]
            targets.append(target[-1])  # 取最后一个值作为预测目标
            
        return np.array(sequences), np.array(targets)
    
    def train_transformer_model(self, df_X: pd.DataFrame, df_y: pd.DataFrame, 
                              epochs: int = 100, batch_size: int = 32, 
                              learning_rate: float = 0.001, save_path: str = 'transformer_model.pkl') -> TransformerPredictor:
        """
        训练Transformer模型
        
        参数:
        - df_X: 特征数据DataFrame
        - df_y: 目标数据DataFrame
        - epochs: 训练轮数
        - batch_size: 批次大小
        - learning_rate: 学习率
        - save_path: 模型保存路径
        
        返回:
        - 训练好的Transformer模型
        """
        # 数据准备
        X, y = self.prepare_sequences(df_X, df_y.iloc[:, 0])
        
        # 转换为PyTorch张量
        X_tensor = torch.FloatTensor(X)
        y_tensor = torch.FloatTensor(y).unsqueeze(1)  # 添加维度
        
        # 创建数据加载器
        dataset = TensorDataset(X_tensor, y_tensor)
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
        
        # 初始化模型
        input_dim = X.shape[2]  # 特征维度
        self.model = TransformerPredictor(input_dim=input_dim)
        
        # 定义损失函数和优化器
        criterion = nn.MSELoss()
        optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
        
        # 训练模型
        self.model.train()
        for epoch in range(epochs):
            total_loss = 0
            for batch_X, batch_y in dataloader:
                optimizer.zero_grad()
                
                # 前向传播
                outputs = self.model(batch_X)
                loss = criterion(outputs.squeeze(), batch_y.squeeze())
                
                # 反向传播
                loss.backward()
                optimizer.step()
                
                total_loss += loss.item()
            
            if (epoch + 1) % 10 == 0:
                print(f'Epoch [{epoch+1}/{epochs}], Loss: {total_loss/len(dataloader):.4f}')
        
        # 保存模型和标准化器
        model_data = {
            'model_state_dict': self.model.state_dict(),
            'scaler_X': self.scaler_X,
            'scaler_y': self.scaler_y,
            'seq_length': self.seq_length,
            'prediction_length': self.prediction_length,
            'input_dim': input_dim
        }
        
        with open(save_path, 'wb') as f:
            pickle.dump(model_data, f)
        
        print(f"模型已保存到 {save_path}")
        return self.model
    
    def load_model(self, model_path: str) -> TransformerPredictor:
        """
        加载已保存的模型
        """
        with open(model_path, 'rb') as f:
            model_data = pickle.load(f)
        
        # 重建模型
        self.model = TransformerPredictor(input_dim=model_data['input_dim'])
        self.model.load_state_dict(model_data['model_state_dict'])
        self.scaler_X = model_data['scaler_X']
        self.scaler_y = model_data['scaler_y']
        self.seq_length = model_data['seq_length']
        self.prediction_length = model_data['prediction_length']
        
        return self.model


class TransformerPredictor:
    """Transformer预测器"""
    
    def __init__(self, trainer: TransformerTrainer):
        self.trainer = trainer
    
    def predict_with_transformer(self, df_X: pd.DataFrame, model_path: str = None) -> List[float]:
        """
        使用Transformer模型进行预测
        
        参数:
        - df_X: 输入特征数据DataFrame
        - model_path: 模型文件路径（如果为None则使用已加载的模型）
        
        返回:
        - 预测结果列表
        """
        # 加载模型（如果需要）
        if model_path:
            self.trainer.load_model(model_path)
        
        if self.trainer.model is None:
            raise ValueError("请先训练模型或加载已保存的模型")
        
        # 准备输入序列
        X_scaled = self.trainer.scaler_X.transform(df_X.values)
        
        predictions = []
        
        # 对数据进行滑动窗口预测
        for i in range(len(X_scaled) - self.trainer.seq_length + 1):
            # 提取序列
            seq = X_scaled[i:(i + self.trainer.seq_length)]
            seq_tensor = torch.FloatTensor(seq).unsqueeze(0)  # 添加批次维度
            
            # 预测
            self.trainer.model.eval()
            with torch.no_grad():
                output = self.trainer.model(seq_tensor)
                pred_scaled = output.squeeze().numpy()
                
                # 反标准化
                pred_original = self.trainer.scaler_y.inverse_transform([pred_scaled])[0][0]
                predictions.append(float(pred_original))
        
        # 如果输入数据行数少于序列长度，返回空列表或处理边界情况
        if len(predictions) == 0:
            print("警告：输入数据行数少于序列长度，无法生成预测")
            return []
        
        # 处理边界：用最后一个预测值填充剩余位置
        while len(predictions) < len(df_X):
            predictions.append(predictions[-1])
        
        return predictions[:len(df_X)]  # 确保返回数量与输入行数一致


# 使用示例和辅助函数
def train_transformer_model(df_X: pd.DataFrame, df_y: pd.DataFrame, 
                           seq_length: int = 10, epochs: int = 100, 
                           save_path: str = 'transformer_model.pkl') -> TransformerPredictor:
    """
    训练Transformer模型的主函数
    
    参数:
    - df_X: 特征数据DataFrame
    - df_y: 目标数据DataFrame
    - seq_length: 序列长度
    - epochs: 训练轮数
    - save_path: 模型保存路径
    
    返回:
    - 训练好的Transformer模型
    """
    trainer = TransformerTrainer(seq_length=seq_length)
    model = trainer.train_transformer_model(df_X, df_y, epochs=epochs, save_path=save_path)
    return model


def predict_with_transformer_model(df_X: pd.DataFrame, model_path: str) -> List[float]:
    """
    使用Transformer模型进行预测的主函数
    
    参数:
    - df_X: 输入特征数据DataFrame
    - model_path: 模型文件路径
    
    返回:
    - 预测结果列表（长度与输入DataFrame行数一致）
    """
    trainer = TransformerTrainer()
    predictor = TransformerPredictor(trainer)
    predictions = predictor.predict_with_transformer(df_X, model_path)
    return predictions


# 测试函数
def test_transformer_model():
    """测试Transformer模型"""
    # 创建测试数据
    np.random.seed(42)
    n_samples = 200
    
    # 生成特征数据
    df_X = pd.DataFrame({
        'feature1': np.random.randn(n_samples).cumsum(),
        'feature2': np.random.randn(n_samples) * 0.5 + 2,
        'feature3': np.sin(np.arange(n_samples) * 0.1) + np.random.randn(n_samples) * 0.1
    })
    
    # 生成目标数据（添加一些模式）
    df_y = pd.DataFrame({
        'target': df_X['feature1'] * 0.5 + df_X['feature2'] * 0.3 + np.random.randn(n_samples) * 0.2
    })
    
    print("测试数据生成完成:")
    print(f"特征数据形状: {df_X.shape}")
    print(f"目标数据形状: {df_y.shape}")
    
    # 训练模型
    print("\n开始训练Transformer模型...")
    model = train_transformer_model(df_X, df_y, seq_length=20, epochs=50, 
                                   save_path='test_transformer_model.pkl')
    
    # 进行预测
    print("\n开始预测...")
    predictions = predict_with_transformer_model(df_X, 'test_transformer_model.pkl')
    
    print(f"预测结果数量: {len(predictions)}")
    print(f"输入数据行数: {len(df_X)}")
    print(f"预测结果前10个: {predictions[:10]}")
    print("测试完成！")


if __name__ == "__main__":
    # 运行测试
    test_transformer_model()