#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
排列5序列LSTM快速训练脚本
为了演示目的，使用较少的epochs进行快速训练
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from sklearn.model_selection import train_test_split
from datetime import datetime

# 添加项目根目录到路径
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)

from algorithms.plw_sequence_lstm import PLWSequenceLSTM, PLWDataProcessor

def quick_train_plw_sequence_lstm():
    """快速训练排列5序列LSTM模型（演示版本）"""
    print("🚀 快速训练排列5序列LSTM模型")
    print("=" * 60)
    
    # 配置参数（快速训练）
    CONFIG = {
        'data_file': os.path.join('scripts', 'plw', 'plw_history.csv'),
        'model_save_path': os.path.join('scripts', 'plw', 'plw_sequence_lstm_model.pth'),
        'window_size': 5,  # 较小的窗口
        'hidden_dim': 64,  # 较小的隐藏维度
        'num_layers': 2,   # 较少的层数
        'dropout': 0.2,
        'batch_size': 16,  # 较小的批次
        'epochs': 20,      # 较少的epochs
        'learning_rate': 0.01,  # 较高的学习率
        'device': 'cuda' if torch.cuda.is_available() else 'cpu'
    }
    
    # 检查数据文件
    if not os.path.exists(CONFIG['data_file']):
        print(f" 数据文件不存在: {CONFIG['data_file']}")
        return False
    
    device = torch.device(CONFIG['device'])
    print(f"📱 使用设备: {device}")
    
    try:
        # 1. 数据加载
        print("加载数据...")
        processor = PLWDataProcessor(CONFIG['data_file'], CONFIG['window_size'])
        X, y = processor.load_and_process_data()
        
        print(f"特征形状: {X.shape}")
        print(f"标签形状: {y.shape}")
        
        if len(X) < 10:
            print(" 数据量太少，无法训练")
            return False
        
        # 数据集划分
        X_train, X_val, y_train, y_val = train_test_split(
            X, y, test_size=0.2, random_state=42, shuffle=True
        )
        
        # 转换为PyTorch张量
        X_train = torch.FloatTensor(X_train).to(device)
        y_train = torch.LongTensor(y_train).to(device)
        X_val = torch.FloatTensor(X_val).to(device)
        y_val = torch.LongTensor(y_val).to(device)
        
        # 创建数据加载器
        train_dataset = TensorDataset(X_train, y_train)
        train_loader = DataLoader(train_dataset, batch_size=CONFIG['batch_size'], shuffle=True)
        
        # 2. 创建模型
        print(" 创建序列LSTM模型...")
        model = PLWSequenceLSTM(
            input_dim=5,
            hidden_dim=CONFIG['hidden_dim'],
            num_layers=CONFIG['num_layers'],
            dropout=CONFIG['dropout']
        ).to(device)
        
        print(f"模型参数数量: {sum(p.numel() for p in model.parameters()):,}")
        
        # 3. 定义损失函数和优化器
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=CONFIG['learning_rate'])
        
        # 4. 快速训练循环
        print("🏃 开始快速训练...")
        model.train()
        
        for epoch in range(CONFIG['epochs']):
            total_loss = 0
            num_batches = 0
            
            for batch_X, batch_y in train_loader:
                optimizer.zero_grad()
                
                # 前向传播
                predictions = model(batch_X)  # [batch_size, 5, 10]
                
                # 计算每个位置的损失
                loss = 0
                for pos in range(5):
                    pos_loss = criterion(predictions[:, pos, :], batch_y[:, pos])
                    loss += pos_loss
                loss = loss / 5  # 平均位置损失
                
                # 反向传播
                loss.backward()
                optimizer.step()
                
                total_loss += loss.item()
                num_batches += 1
            
            avg_loss = total_loss / num_batches
            
            if (epoch + 1) % 5 == 0:
                print(f"Epoch {epoch+1:2d}/{CONFIG['epochs']}: Loss = {avg_loss:.4f}")
        
        # 5. 保存模型
        print("💾 保存模型...")
        torch.save({
            'model_state_dict': model.state_dict(),
            'config': CONFIG,
            'epoch': CONFIG['epochs'],
            'loss': avg_loss,
            'timestamp': datetime.now().isoformat()
        }, CONFIG['model_save_path'])
        
        print(f"✅ 模型已保存到: {CONFIG['model_save_path']}")
        
        # 6. 简单测试
        print("🧪 测试模型...")
        model.eval()
        with torch.no_grad():
            if len(X_val) > 0:
                test_input = X_val[:1]  # 取一个样本
                predictions, probabilities = model.predict(test_input)
                predicted_numbers = predictions[0].cpu().numpy().tolist()
                print(f"测试预测: {predicted_numbers}")
                print(" 模型测试成功")
        
        print("🎉 快速训练完成！")
        return True
        
    except Exception as e:
        print(f" 训练失败: {e}")
        import traceback
        print(traceback.format_exc())
        return False

if __name__ == "__main__":
    success = quick_train_plw_sequence_lstm()
    if success:
        print("\n🎯 PLW序列LSTM模型快速训练成功!")
        print("现在可以在主程序中使用真实训练的模型进行预测了。")
    else:
        print("\n❌ 训练失败，请检查数据和配置。")