#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
排列5序列LSTM模型训练脚本
专门为排列5设计的训练流程
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from sklearn.model_selection import train_test_split
from datetime import datetime
import joblib

# 添加项目根目录到路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

from algorithms.plw_sequence_lstm import PLWSequenceLSTM, PLWDataProcessor

# 配置参数
CONFIG = {
    'data_file': os.path.join(project_root, 'scripts', 'plw', 'plw_history.csv'),
    'model_save_path': os.path.join(project_root, 'scripts', 'plw', 'plw_sequence_lstm_model.pth'),
    'scaler_save_path': os.path.join(project_root, 'scripts', 'plw', 'plw_sequence_scaler.pkl'),
    'window_size': 10,
    'hidden_dim': 128,
    'num_layers': 3,
    'dropout': 0.3,
    'batch_size': 32,
    'epochs': 200,
    'learning_rate': 0.001,
    'patience': 20,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu'
}

class PLWPositionLoss(nn.Module):
    """排列5位置感知损失函数"""
    
    def __init__(self, position_weights=None):
        super(PLWPositionLoss, self).__init__()
        self.ce_loss = nn.CrossEntropyLoss(reduction='none')
        
        # 位置权重：万位权重最高，个位权重相对较低
        if position_weights is None:
            self.position_weights = torch.tensor([1.2, 1.1, 1.0, 1.0, 0.9])
        else:
            self.position_weights = torch.tensor(position_weights)
    
    def forward(self, predictions, targets):
        """
        计算位置感知损失
        Args:
            predictions: [batch_size, 5, 10] - 每个位置的预测logits
            targets: [batch_size, 5] - 真实标签
        """
        batch_size, num_positions, num_classes = predictions.shape
        
        total_loss = 0
        position_weights = self.position_weights.to(predictions.device)
        
        for pos in range(num_positions):
            # 计算每个位置的交叉熵损失
            pos_loss = self.ce_loss(predictions[:, pos, :], targets[:, pos].long())
            # 应用位置权重
            weighted_loss = pos_loss * position_weights[pos]
            total_loss += weighted_loss.mean()
        
        return total_loss / num_positions


def train_plw_sequence_lstm():
    """训练排列5序列LSTM模型"""
    print("[START] 开始训练排列5序列LSTM模型")
    print("=" * 60)
    
    # 检查数据文件
    if not os.path.exists(CONFIG['data_file']):
        print(f" 数据文件不存在: {CONFIG['data_file']}")
        print("请先运行主程序获取排列5历史数据")
        return False
    
    # 设置设备
    device = torch.device(CONFIG['device'])
    print(f"[DEVICE] 使用设备: {device}")
    
    try:
        # 1. 数据加载和预处理
        print("[DATA] 加载和预处理数据...")
        processor = PLWDataProcessor(CONFIG['data_file'], CONFIG['window_size'])
        X, y = processor.load_and_process_data()
        
        print(f"特征形状: {X.shape}")
        print(f"标签形状: {y.shape}")
        
        # 数据集划分
        X_train, X_val, y_train, y_val = train_test_split(
            X, y, test_size=0.2, random_state=42, shuffle=True
        )
        
        print(f"训练集: {X_train.shape}, 验证集: {X_val.shape}")
        
        # 转换为PyTorch张量
        X_train = torch.FloatTensor(X_train).to(device)
        y_train = torch.LongTensor(y_train).to(device)
        X_val = torch.FloatTensor(X_val).to(device)
        y_val = torch.LongTensor(y_val).to(device)
        
        # 创建数据加载器
        train_dataset = TensorDataset(X_train, y_train)
        val_dataset = TensorDataset(X_val, y_val)
        
        train_loader = DataLoader(train_dataset, batch_size=CONFIG['batch_size'], shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=CONFIG['batch_size'], shuffle=False)
        
        # 2. 创建模型
        print("[MODEL] 创建序列LSTM模型...")
        model = PLWSequenceLSTM(
            input_dim=10,  # 修复：使用10维输入（5个原始数字+5个区域转换特征）
            hidden_dim=CONFIG['hidden_dim'],
            num_layers=CONFIG['num_layers'],
            dropout=CONFIG['dropout']
        ).to(device)
        
        print(f"模型参数数量: {sum(p.numel() for p in model.parameters()):,}")
        
        # 3. 定义损失函数和优化器
        criterion = PLWPositionLoss()
        optimizer = optim.AdamW(
            model.parameters(), 
            lr=CONFIG['learning_rate'],
            weight_decay=1e-4
        )
        
        # 学习率调度器
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.5, patience=10, verbose=True
        )
        
        # 4. 训练循环
        print("[RUN] 开始训练...")
        best_val_loss = float('inf')
        patience_counter = 0
        train_losses = []
        val_losses = []
        
        for epoch in range(CONFIG['epochs']):
            # 训练阶段
            model.train()
            total_train_loss = 0
            num_batches = 0
            
            for batch_X, batch_y in train_loader:
                optimizer.zero_grad()
                
                # 前向传播
                predictions = model(batch_X)
                loss = criterion(predictions, batch_y)
                
                # 反向传播
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                optimizer.step()
                
                total_train_loss += loss.item()
                num_batches += 1
            
            avg_train_loss = total_train_loss / num_batches
            train_losses.append(avg_train_loss)
            
            # 验证阶段
            model.eval()
            total_val_loss = 0
            total_correct = 0
            total_positions = 0
            
            with torch.no_grad():
                for batch_X, batch_y in val_loader:
                    predictions = model(batch_X)
                    loss = criterion(predictions, batch_y)
                    
                    total_val_loss += loss.item()
                    
                    # 计算准确率（每个位置独立计算）
                    pred_numbers = torch.argmax(predictions, dim=-1)
                    correct = (pred_numbers == batch_y).float()
                    total_correct += correct.sum().item()
                    total_positions += batch_y.numel()
            
            avg_val_loss = total_val_loss / len(val_loader)
            val_accuracy = total_correct / total_positions
            val_losses.append(avg_val_loss)
            
            # 学习率调度
            scheduler.step(avg_val_loss)
            
            # 每10个epoch打印一次
            if (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch+1:3d}/{CONFIG['epochs']}: "
                      f"Train Loss: {avg_train_loss:.4f}, "
                      f"Val Loss: {avg_val_loss:.4f}, "
                      f"Val Acc: {val_accuracy:.4f}, "
                      f"LR: {optimizer.param_groups[0]['lr']:.6f}")
            
            # 早停检查
            if avg_val_loss < best_val_loss:
                best_val_loss = avg_val_loss
                patience_counter = 0
                
                # 保存最佳模型
                torch.save({
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'epoch': epoch,
                    'train_loss': avg_train_loss,
                    'val_loss': avg_val_loss,
                    'val_accuracy': val_accuracy,
                    'config': CONFIG
                }, CONFIG['model_save_path'])
                
                print(f"[SAVE] 保存最佳模型 (Epoch {epoch+1}, Val Loss: {avg_val_loss:.4f})")
                
            else:
                patience_counter += 1
                if patience_counter >= CONFIG['patience']:
                    print(f"[STOP] 早停触发 (Patience: {CONFIG['patience']})")
                    break
        
        # 5. 测试最终模型
        print("\n[TEST] 测试最终模型...")
        
        # 加载最佳模型
        checkpoint = torch.load(CONFIG['model_save_path'], map_location=device)
        model.load_state_dict(checkpoint['model_state_dict'])
        model.eval()
        
        # 在验证集上进行最终测试
        total_correct = 0
        total_exact_match = 0  # 完全匹配（5个位置都正确）
        total_samples = 0
        position_accuracies = [0] * 5
        
        with torch.no_grad():
            for batch_X, batch_y in val_loader:
                predictions = model(batch_X)
                pred_numbers = torch.argmax(predictions, dim=-1)
                
                # 每个位置的准确率
                for pos in range(5):
                    position_correct = (pred_numbers[:, pos] == batch_y[:, pos]).float().sum().item()
                    position_accuracies[pos] += position_correct
                
                # 完全匹配
                exact_matches = torch.all(pred_numbers == batch_y, dim=1).float().sum().item()
                total_exact_match += exact_matches
                
                total_correct += (pred_numbers == batch_y).float().sum().item()
                total_samples += batch_y.shape[0]
        
        # 计算最终指标
        overall_accuracy = total_correct / (total_samples * 5)
        exact_match_accuracy = total_exact_match / total_samples
        
        print(f"\n[RESULT] 最终测试结果:")
        print(f"整体准确率: {overall_accuracy:.4f}")
        print(f"完全匹配率: {exact_match_accuracy:.4f}")
        
        print(f"\n各位置准确率:")
        position_names = ['万位', '千位', '百位', '十位', '个位']
        for pos in range(5):
            pos_acc = position_accuracies[pos] / total_samples
            print(f"  {position_names[pos]}: {pos_acc:.4f}")
        
        # 6. 保存数据处理器（如果需要的话）
        # joblib.dump(processor.scaler, CONFIG['scaler_save_path'])
        
        print(f"\n[SUCCESS] 训练完成！")
        print(f"模型保存到: {CONFIG['model_save_path']}")
        print(f"最佳验证损失: {best_val_loss:.4f}")
        
        return True
        
    except Exception as e:
        print(f"[ERROR] 训练过程出错: {e}")
        import traceback
        print(traceback.format_exc())
        return False


if __name__ == "__main__":
    success = train_plw_sequence_lstm()
    if success:
        print("[OK] 排列5序列LSTM模型训练成功！")
    else:
        print("[ERROR] 排列5序列LSTM模型训练失败！")