import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np

class StockDataset(Dataset):
    def __init__(self, csv_path, seq_length=30):
        self.data = pd.read_csv(csv_path)
        self.features = self.data.iloc[:, :-1].values.astype(np.float32)
        self.targets = self.data.iloc[:, -1].values.astype(np.float32)
        self.seq_length = seq_length

        # 数据标准化
        self.feature_mean = self.features.mean(axis=0)
        self.feature_std = self.features.std(axis=0)
        self.features = (self.features - self.feature_mean) / self.feature_std

        self.target_mean = self.targets.mean()
        self.target_std = self.targets.std()
        self.targets = (self.targets - self.target_mean) / self.target_std

    def __len__(self):
        return len(self.data) - self.seq_length

    def __getitem__(self, idx):
        features = self.features[idx:idx+self.seq_length]
        target = self.targets[idx+self.seq_length]
        return torch.tensor(features), torch.tensor(target)

class StockTransformer(nn.Module):
    def __init__(self, feature_dim=40, d_model=128, nhead=8, num_layers=4):
        super().__init__()
        self.input_proj = nn.Linear(feature_dim, d_model)
        self.pos_encoder = nn.Parameter(torch.randn(1, d_model))
        
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=512,
            dropout=0.1
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.regressor = nn.Sequential(
            nn.Linear(d_model, 64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )

    def forward(self, x):
        # x shape: (seq_len, batch_size, feature_dim)
        x = self.input_proj(x)
        x = x + self.pos_encoder[:x.size(0)]
        x = self.transformer(x)
        x = x.mean(dim=0)  # 聚合时序信息
        return self.regressor(x)

def train_model():
    # 配置参数
    config = {
        'batch_size': 64,
        'epochs': 100,
        'learning_rate': 1e-4,
        'seq_length': 30
    }

    # 初始化数据集
    dataset = StockDataset('dataset/training_dataset.csv', config['seq_length'])
    train_size = int(0.8 * len(dataset))
    val_size = int(0.1 * len(dataset))
    test_size = len(dataset) - train_size - val_size
    train_set, val_set, test_set = torch.utils.data.random_split(
        dataset, [train_size, val_size, test_size])

    train_loader = DataLoader(train_set, batch_size=config['batch_size'], shuffle=True)
    val_loader = DataLoader(val_set, batch_size=config['batch_size'])

    # 初始化模型
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = StockTransformer().to(device)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])

    # 训练循环
    best_val_loss = float('inf')
    for epoch in range(config['epochs']):
        model.train()
        for inputs, targets in train_loader:
            inputs = inputs.permute(1, 0, 2).to(device)  # (seq_len, batch, features)
            outputs = model(inputs)
            loss = criterion(outputs.squeeze(), targets.to(device))
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # 验证
        model.eval()
        val_loss = 0
        with torch.no_grad():
            for inputs, targets in val_loader:
                inputs = inputs.permute(1, 0, 2).to(device)
                outputs = model(inputs)
                val_loss += criterion(outputs.squeeze(), targets.to(device)).item()
        
        val_loss /= len(val_loader)
        print(f'Epoch {epoch+1}, Val Loss: {val_loss:.4f}')
        
        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), 'best_model.pth')

    # 测试集评估
    test_loader = DataLoader(test_set, batch_size=config['batch_size'])
    model.load_state_dict(torch.load('best_model.pth'))
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for inputs, targets in test_loader:
            inputs = inputs.permute(1, 0, 2).to(device)
            outputs = model(inputs)
            test_loss += criterion(outputs.squeeze(), targets.to(device)).item()
    test_loss /= len(test_loader)
    print(f'\nFinal Test Loss: {test_loss:.4f}')

    # 保存训练结果
    results = {
        'best_val_loss': best_val_loss,
        'test_loss': test_loss,
        'model_params': model.state_dict(),
        'feature_mean': dataset.feature_mean,
        'feature_std': dataset.feature_std,
        'target_mean': dataset.target_mean,
        'target_std': dataset.target_std
    }
    torch.save(results, 'training_results.pth')

if __name__ == '__main__':
    train_model()