import os
import sys
import subprocess
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
import joblib
from loguru import logger
from sklearn.metrics import accuracy_score
import copy
import torch.nn as nn
from sklearn.model_selection import KFold
from torch.nn import TransformerEncoder, TransformerEncoderLayer

# 配置安全的loguru
try:
    from safe_logger_config import configure_safe_logger
    logger = configure_safe_logger()
except ImportError:
    # 降级配置 - 移除不支持的参数
    from loguru import logger
    logger.remove()
    logger.add(sys.stdout, 
              format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", 
              level="INFO")

# ---------------- 配置 ----------------
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(current_dir, "ssq_history.csv")
MODEL_PATH = os.path.join(current_dir, "ssq_model.pth")
SCALER_PATH = os.path.join(current_dir, "scaler_X.pkl")
BATCH_SIZE = 32
EPOCHS = 1000  # 增加训练轮数
LEARNING_RATE = 0.001
WINDOW_SIZE = 10
RED_BALLS = 6  # 双色球红球号码数量
BLUE_BALLS = 1  # 双色球蓝球号码数量
RED_CLASSES = 33  # 红球号码范围1-33
BLUE_CLASSES = 16  # 蓝球号码范围1-16
OUTPUT_SEQ_LENGTH_RED = RED_BALLS  # 红球序列长度
OUTPUT_SEQ_LENGTH_BLUE = BLUE_BALLS  # 蓝球序列长度
HIDDEN_DIM = 128  # 增加隐藏层维度
PATIENCE = 10  # 早停耐心值

# 获取项目根目录
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

try:
    from model import LstmCRFModel  # 使用绝对导入
except ImportError as e:
    logger.error(f"导入模型类失败: {e}")
    sys.exit(1)

from sklearn.model_selection import train_test_split

# 添加Transformer模型类
class TransformerModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, output_seq_length, num_layers=2, nhead=4, dropout=0.1):
        super(TransformerModel, self).__init__()
        self.model_type = 'Transformer'
        self.pos_encoder = nn.Linear(input_dim, hidden_dim)
        encoder_layers = TransformerEncoderLayer(d_model=hidden_dim, nhead=nhead, dim_feedforward=hidden_dim*4, dropout=dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, num_layers)
        self.decoder = nn.Linear(hidden_dim, output_dim)
        self.output_dim = output_dim
        self.output_seq_length = output_seq_length
        self.hidden_dim = hidden_dim
        self.dropout = nn.Dropout(dropout)
        self.init_weights()
        
    def init_weights(self):
        initrange = 0.1
        self.decoder.bias.data.zero_()
        self.decoder.weight.data.uniform_(-initrange, initrange)
        
    def forward(self, src, target=None, mask=None):
        # src shape: [batch_size, seq_len, input_dim]
        src = src.permute(1, 0, 2)  # [seq_len, batch_size, input_dim]
        src = self.pos_encoder(src)
        output = self.transformer_encoder(src)
        output = self.dropout(output)
        output = self.decoder(output)
        output = output.mean(dim=0)  # [batch_size, output_dim]
        
        if target is not None and mask is not None:
            # 计算损失
            loss_fct = nn.CrossEntropyLoss(reduction='none')
            loss = 0
            for i in range(self.output_seq_length):
                active_loss = mask[:, i].float()
                if active_loss.sum() > 0:
                    active_logits = output.view(-1, self.output_dim)
                    active_labels = target[:, i].view(-1)
                    loss += (loss_fct(active_logits, active_labels) * active_loss).sum() / active_loss.sum()
            return loss / self.output_seq_length
        
        # 预测模式
        predictions = []
        for _ in range(self.output_seq_length):
            pred = torch.argmax(output, dim=1).cpu().numpy()
            predictions.append(pred)
        return predictions

# 集成模型类
class EnsembleModel:
    def __init__(self, lstm_model, transformer_model, weights=[0.6, 0.4]):
        self.lstm_model = lstm_model
        self.transformer_model = transformer_model
        self.weights = weights
        
    def predict(self, features):
        self.lstm_model.eval()
        self.transformer_model.eval()
        
        with torch.no_grad():
            lstm_preds = self.lstm_model(features)
            transformer_preds = self.transformer_model(features)
            
            # 合并预测结果
            ensemble_preds = []
            for i in range(len(lstm_preds)):
                lstm_pred = lstm_preds[i]
                transformer_pred = transformer_preds[i]
                
                # 简单加权投票
                if np.random.rand() < self.weights[0]:
                    ensemble_preds.append(lstm_pred)
                else:
                    ensemble_preds.append(transformer_pred)
                    
            return ensemble_preds

class LotteryDataset(Dataset):
    def __init__(self, csv_file, window_size, red_balls, blue_balls):
        self.data = pd.read_csv(csv_file)
        self.scaler_X = MinMaxScaler()
        self.features, self.labels = self.preprocess(self.data, window_size, red_balls, blue_balls)
        
        # 添加数据增强
        self.augment_data()

    def preprocess(self, data, window_size, red_balls, blue_balls):
        features, labels = [], []
        expected_columns = 1 + red_balls + blue_balls
        if len(data.columns) < expected_columns:
            raise ValueError(f"数据列数不足，当前列数: {len(data.columns)}，期望至少 {expected_columns} 列。")

        for i in range(len(data) - window_size):
            # 特征：选取窗口内的红球和蓝球数据
            feature_window = data.iloc[i:i + window_size, 1:1 + red_balls + blue_balls].values
            features.append(feature_window)

            # 标签：下一期的红球和蓝球
            red_labels_seq = data.iloc[i + window_size, 1:1 + red_balls].values - 1  # 减1使其从0开始
            blue_label = data.iloc[i + window_size, 1 + red_balls:1 + red_balls + blue_balls].values - 1
            combined_labels = np.concatenate((red_labels_seq, blue_label))
            labels.append(combined_labels)

        # 转换为NumPy数组并进行缩放
        features_np = np.array(features)  # 形状: (num_samples, window_size, feature_dim)
        features_scaled = self.scaler_X.fit_transform(features_np.reshape(-1, features_np.shape[-1])).reshape(features_np.shape)

        labels_np = np.array(labels)  # 形状: (num_samples, total_labels)

        return (
            torch.tensor(features_scaled, dtype=torch.float32),
            torch.tensor(labels_np, dtype=torch.long)
        )
        
    def augment_data(self):
        """数据增强：添加随机噪声和打乱顺序来增强数据多样性"""
        logger.info("执行数据增强...")
        
        # 获取原始数据的副本
        original_features = self.features.clone()
        original_labels = self.labels.clone()
        
        # 创建增强数据
        augmented_features = []
        augmented_labels = []
        
        # 1. 添加微小随机噪声
        noise_features = original_features + torch.randn_like(original_features) * 0.05
        augmented_features.append(noise_features)
        augmented_labels.append(original_labels)
        
        # 2. 随机打乱红球顺序（标签）
        for i in range(len(original_labels)):
            # 只打乱红球部分（前6个数字）
            red_part = original_labels[i, :RED_BALLS].clone()
            # 随机打乱顺序
            idx = torch.randperm(RED_BALLS)
            shuffled_red = red_part[idx]
            # 组合回完整标签
            new_label = original_labels[i].clone()
            new_label[:RED_BALLS] = shuffled_red
            augmented_labels.append(new_label.unsqueeze(0))
            augmented_features.append(original_features[i].unsqueeze(0))
        
        # 合并原始数据和增强数据
        all_features = [original_features] + augmented_features
        all_labels = [original_labels] + augmented_labels
        
        self.features = torch.cat(all_features, dim=0)
        self.labels = torch.cat(all_labels, dim=0)
        
        logger.info(f"数据增强完成，样本数从 {len(original_features)} 增加到 {len(self.features)}")

    def __len__(self):
        """返回数据集中的样本数量"""
        return self.features.shape[0]
        
    def __getitem__(self, idx):
        """根据索引返回特征和标签"""
        return self.features[idx], self.labels[idx]

def fetch_data_if_not_exists():
    """
    检查 CSV 文件是否存在，如果不存在，则调用 fetch_ssq_data.py 获取数据
    """
    if not os.path.exists(DATA_FILE):
        logger.info(f"数据文件 {DATA_FILE} 不存在，开始获取数据...")
        fetch_script = os.path.join(current_dir, 'fetch_ssq_data.py')
        if not os.path.exists(fetch_script):
            logger.error(f"数据获取脚本不存在: {fetch_script}")
            sys.exit(1)
        try:
            # 使用当前运行的 Python 解释器
            python_executable = sys.executable
            logger.info(f"运行数据获取脚本: {fetch_script} 使用解释器: {python_executable}")
            subprocess.run([python_executable, fetch_script], check=True, 
                          encoding='utf-8', errors='replace')
            logger.info("数据获取完成。")
        except subprocess.CalledProcessError as e:
            logger.error(f"运行数据获取脚本失败: {e}")
            sys.exit(1)
    else:
        logger.info(f"数据文件 {DATA_FILE} 已存在。")

def train_model():
    fetch_data_if_not_exists()

    if not os.path.exists(DATA_FILE):
        logger.error(f"数据文件不存在: {DATA_FILE}")
        sys.exit(1)

    # 数据加载
    logger.info("加载数据...")
    dataset = LotteryDataset(DATA_FILE, WINDOW_SIZE, RED_BALLS, BLUE_BALLS)
    
    # 使用K折交叉验证
    k_folds = 5
    kfold = KFold(n_splits=k_folds, shuffle=True, random_state=42)
    
    # 创建索引数组用于KFold分割
    indices = np.arange(len(dataset))
    
    input_dim = dataset.features.shape[-1]
    
    # 检查 GPU 是否可用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 存储每个折叠的最佳模型
    best_models = []
    
    # 交叉验证训练
    logger.info(f"开始{k_folds}折交叉验证训练...")
    
    for fold, (train_ids, val_ids) in enumerate(kfold.split(indices)):
        logger.info(f"开始第 {fold + 1} 折训练")
        
        # 准备数据加载器
        train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
        val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids)
        
        train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=train_subsampler)
        val_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=val_subsampler)
        
        # 初始化模型
        # LSTM-CRF模型 - 增加dropout参数
        red_lstm_model = LstmCRFModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=RED_CLASSES, 
                                      output_seq_length=OUTPUT_SEQ_LENGTH_RED, num_layers=2, dropout=0.3).to(device)
        blue_lstm_model = LstmCRFModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=BLUE_CLASSES, 
                                       output_seq_length=OUTPUT_SEQ_LENGTH_BLUE, num_layers=2, dropout=0.3).to(device)
        
        # Transformer模型 - 增加dropout参数
        red_transformer_model = TransformerModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=RED_CLASSES, 
                                                output_seq_length=OUTPUT_SEQ_LENGTH_RED, dropout=0.3).to(device)
        blue_transformer_model = TransformerModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=BLUE_CLASSES, 
                                                 output_seq_length=OUTPUT_SEQ_LENGTH_BLUE, dropout=0.3).to(device)
        
        # 优化器 - 增加权重衰减以增强正则化
        red_lstm_optimizer = torch.optim.Adam(red_lstm_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)  # 增加权重衰减
        blue_lstm_optimizer = torch.optim.Adam(blue_lstm_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)
        red_transformer_optimizer = torch.optim.Adam(red_transformer_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)
        blue_transformer_optimizer = torch.optim.Adam(blue_transformer_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)
        
        # 学习率调度器 - 使用ReduceLROnPlateau代替StepLR，更智能地调整学习率
        red_lstm_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(red_lstm_optimizer, mode='min', factor=0.5, patience=5)
        blue_lstm_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(blue_lstm_optimizer, mode='min', factor=0.5, patience=5)
        red_transformer_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(red_transformer_optimizer, mode='min', factor=0.5, patience=5)
        blue_transformer_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(blue_transformer_optimizer, mode='min', factor=0.5, patience=5)
        
        # 训练过程
        best_val_loss = float('inf')
        best_fold_models = {
            'red_lstm': copy.deepcopy(red_lstm_model.state_dict()),
            'blue_lstm': copy.deepcopy(blue_lstm_model.state_dict()),
            'red_transformer': copy.deepcopy(red_transformer_model.state_dict()),
            'blue_transformer': copy.deepcopy(blue_transformer_model.state_dict())
        }
        trigger_times = 0
        
        for epoch in range(EPOCHS):
            # 训练模式
            red_lstm_model.train()
            blue_lstm_model.train()
            red_transformer_model.train()
            blue_transformer_model.train()
            
            total_red_lstm_loss = 0
            total_blue_lstm_loss = 0
            total_red_transformer_loss = 0
            total_blue_transformer_loss = 0
            
            for features, labels in train_loader:
                features = features.to(device)
                labels = labels.to(device)
                
                # 红球训练 - LSTM
                red_labels = labels[:, :RED_BALLS]
                red_mask = (red_labels >= 0)
                red_lstm_loss = red_lstm_model(features, red_labels, red_mask)
                red_lstm_optimizer.zero_grad()
                red_lstm_loss.backward()
                torch.nn.utils.clip_grad_norm_(red_lstm_model.parameters(), 1.0)  # 梯度裁剪
                red_lstm_optimizer.step()
                total_red_lstm_loss += red_lstm_loss.item()
                
                # 蓝球训练 - LSTM
                blue_labels = labels[:, RED_BALLS:]
                blue_mask = (blue_labels >= 0)
                blue_lstm_loss = blue_lstm_model(features, blue_labels, blue_mask)
                blue_lstm_optimizer.zero_grad()
                blue_lstm_loss.backward()
                torch.nn.utils.clip_grad_norm_(blue_lstm_model.parameters(), 1.0)
                blue_lstm_optimizer.step()
                total_blue_lstm_loss += blue_lstm_loss.item()
                
                # 红球训练 - Transformer
                red_transformer_loss = red_transformer_model(features, red_labels, red_mask)
                red_transformer_optimizer.zero_grad()
                red_transformer_loss.backward()
                torch.nn.utils.clip_grad_norm_(red_transformer_model.parameters(), 1.0)
                red_transformer_optimizer.step()
                total_red_transformer_loss += red_transformer_loss.item()
                
                # 蓝球训练 - Transformer
                blue_transformer_loss = blue_transformer_model(features, blue_labels, blue_mask)
                blue_transformer_optimizer.zero_grad()
                blue_transformer_loss.backward()
                torch.nn.utils.clip_grad_norm_(blue_transformer_model.parameters(), 1.0)
                blue_transformer_optimizer.step()
                total_blue_transformer_loss += blue_transformer_loss.item()
            
            # 计算平均损失
            avg_red_lstm_loss = total_red_lstm_loss / len(train_loader)
            avg_blue_lstm_loss = total_blue_lstm_loss / len(train_loader)
            avg_red_transformer_loss = total_red_transformer_loss / len(train_loader)
            avg_blue_transformer_loss = total_blue_transformer_loss / len(train_loader)
            
            # 验证过程
            red_lstm_model.eval()
            blue_lstm_model.eval()
            red_transformer_model.eval()
            blue_transformer_model.eval()
            
            val_red_lstm_loss = 0
            val_blue_lstm_loss = 0
            val_red_transformer_loss = 0
            val_blue_transformer_loss = 0
            
            all_red_lstm_preds, all_red_transformer_preds = [], []
            all_blue_lstm_preds, all_blue_transformer_preds = [], []
            all_red_labels, all_blue_labels = [], []
            
            with torch.no_grad():
                for features, labels in val_loader:
                    features = features.to(device)
                    labels = labels.to(device)
                    
                    # 红球验证 - LSTM
                    red_labels = labels[:, :RED_BALLS]
                    red_mask = (red_labels >= 0)
                    red_lstm_loss = red_lstm_model(features, red_labels, red_mask)
                    val_red_lstm_loss += red_lstm_loss.item()
                    red_lstm_preds = red_lstm_model(features)
                    all_red_lstm_preds.extend([pred for sequence in red_lstm_preds for pred in sequence])
                    
                    # 红球验证 - Transformer
                    red_transformer_loss = red_transformer_model(features, red_labels, red_mask)
                    val_red_transformer_loss += red_transformer_loss.item()
                    red_transformer_preds = red_transformer_model(features)
                    all_red_transformer_preds.extend([pred for sequence in red_transformer_preds for pred in sequence])
                    
                    all_red_labels.extend(red_labels.cpu().numpy().flatten())
                    
                    # 蓝球验证 - LSTM
                    blue_labels = labels[:, RED_BALLS:]
                    blue_mask = (blue_labels >= 0)
                    blue_lstm_loss = blue_lstm_model(features, blue_labels, blue_mask)
                    val_blue_lstm_loss += blue_lstm_loss.item()
                    blue_lstm_preds = blue_lstm_model(features)
                    all_blue_lstm_preds.extend([pred for sequence in blue_lstm_preds for pred in sequence])
                    
                    # 蓝球验证 - Transformer
                    blue_transformer_loss = blue_transformer_model(features, blue_labels, blue_mask)
                    val_blue_transformer_loss += blue_transformer_loss.item()
                    blue_transformer_preds = blue_transformer_model(features)
                    all_blue_transformer_preds.extend([pred for sequence in blue_transformer_preds for pred in sequence])
                    
                    all_blue_labels.extend(blue_labels.cpu().numpy().flatten())
            
            # 计算验证集平均损失
            avg_val_red_lstm_loss = val_red_lstm_loss / len(val_loader)
            avg_val_blue_lstm_loss = val_blue_lstm_loss / len(val_loader)
            avg_val_red_transformer_loss = val_red_transformer_loss / len(val_loader)
            avg_val_blue_transformer_loss = val_blue_transformer_loss / len(val_loader)
            
            # 计算准确率
            red_lstm_accuracy = accuracy_score(all_red_labels, all_red_lstm_preds)
            blue_lstm_accuracy = accuracy_score(all_blue_labels, all_blue_lstm_preds)
            red_transformer_accuracy = accuracy_score(all_red_labels, all_red_transformer_preds)
            blue_transformer_accuracy = accuracy_score(all_blue_labels, all_blue_transformer_preds)
            
            # 集成模型预测
            all_red_ensemble_preds = []
            all_blue_ensemble_preds = []
            
            for i in range(len(all_red_lstm_preds)):
                # 简单投票集成
                if np.random.rand() < 0.6:  # 60%权重给LSTM
                    all_red_ensemble_preds.append(all_red_lstm_preds[i])
                else:
                    all_red_ensemble_preds.append(all_red_transformer_preds[i])
                    
            for i in range(len(all_blue_lstm_preds)):
                if np.random.rand() < 0.6:
                    all_blue_ensemble_preds.append(all_blue_lstm_preds[i])
                else:
                    all_blue_ensemble_preds.append(all_blue_transformer_preds[i])
            
            # 计算集成模型准确率
            red_ensemble_accuracy = accuracy_score(all_red_labels, all_red_ensemble_preds)
            blue_ensemble_accuracy = accuracy_score(all_blue_labels, all_blue_ensemble_preds)
            
            logger.info(f"Fold {fold+1}, Epoch {epoch+1}: "
                        f"LSTM - 红球 Loss = {avg_val_red_lstm_loss:.4f}, 蓝球 Loss = {avg_val_blue_lstm_loss:.4f}, "
                        f"红球 Acc = {red_lstm_accuracy:.4f}, 蓝球 Acc = {blue_lstm_accuracy:.4f} | "
                        f"Transformer - 红球 Loss = {avg_val_red_transformer_loss:.4f}, 蓝球 Loss = {avg_val_blue_transformer_loss:.4f}, "
                        f"红球 Acc = {red_transformer_accuracy:.4f}, 蓝球 Acc = {blue_transformer_accuracy:.4f} | "
                        f"集成 - 红球 Acc = {red_ensemble_accuracy:.4f}, 蓝球 Acc = {blue_ensemble_accuracy:.4f}")
            
            # 更新学习率
            total_val_lstm_loss = avg_val_red_lstm_loss + avg_val_blue_lstm_loss
            total_val_transformer_loss = avg_val_red_transformer_loss + avg_val_blue_transformer_loss
            
            red_lstm_scheduler.step(avg_val_red_lstm_loss)
            blue_lstm_scheduler.step(avg_val_blue_lstm_loss)
            red_transformer_scheduler.step(avg_val_red_transformer_loss)
            blue_transformer_scheduler.step(avg_val_blue_transformer_loss)
            
            # 早停检查 - 使用集成模型的总损失
            total_val_loss = (total_val_lstm_loss + total_val_transformer_loss) / 2
            
            if total_val_loss < best_val_loss:
                best_val_loss = total_val_loss
                best_fold_models = {
                    'red_lstm': copy.deepcopy(red_lstm_model.state_dict()),
                    'blue_lstm': copy.deepcopy(blue_lstm_model.state_dict()),
                    'red_transformer': copy.deepcopy(red_transformer_model.state_dict()),
                    'blue_transformer': copy.deepcopy(blue_transformer_model.state_dict())
                }
                trigger_times = 0
            else:
                trigger_times += 1
                if trigger_times >= PATIENCE:
                    logger.info(f"早停触发，第 {fold+1} 折训练停止在第 {epoch+1} 轮")
                    break
        
        # 保存当前折叠的最佳模型
        best_models.append(best_fold_models)
        logger.info(f"第 {fold+1} 折训练完成，最佳验证损失: {best_val_loss:.4f}")
    
    # 集成所有折叠的最佳模型
    logger.info("集成所有折叠的最佳模型...")
    
    # 初始化最终模型
    final_red_lstm_model = LstmCRFModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=RED_CLASSES, 
                                        output_seq_length=OUTPUT_SEQ_LENGTH_RED, num_layers=2).to(device)
    final_blue_lstm_model = LstmCRFModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=BLUE_CLASSES, 
                                         output_seq_length=OUTPUT_SEQ_LENGTH_BLUE, num_layers=2).to(device)
    final_red_transformer_model = TransformerModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=RED_CLASSES, 
                                                  output_seq_length=OUTPUT_SEQ_LENGTH_RED).to(device)
    final_blue_transformer_model = TransformerModel(input_dim, hidden_dim=HIDDEN_DIM, output_dim=BLUE_CLASSES, 
                                                   output_seq_length=OUTPUT_SEQ_LENGTH_BLUE).to(device)
    
    # 使用最后一折的模型权重作为基础
    final_red_lstm_model.load_state_dict(best_models[-1]['red_lstm'])
    final_blue_lstm_model.load_state_dict(best_models[-1]['blue_lstm'])
    final_red_transformer_model.load_state_dict(best_models[-1]['red_transformer'])
    final_blue_transformer_model.load_state_dict(best_models[-1]['blue_transformer'])
    
    # 创建集成模型
    red_ensemble = EnsembleModel(final_red_lstm_model, final_red_transformer_model)
    blue_ensemble = EnsembleModel(final_blue_lstm_model, final_blue_transformer_model)
    
    # 保存模型和缩放器
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    
    # 保存LSTM模型
    lstm_model_path = os.path.join(os.path.dirname(MODEL_PATH), "ssq_lstm_model.pth")
    torch.save({
        "red_model": final_red_lstm_model.state_dict(),
        "blue_model": final_blue_lstm_model.state_dict()
    }, lstm_model_path)
    
    # 保存Transformer模型
    transformer_model_path = os.path.join(os.path.dirname(MODEL_PATH), "ssq_transformer_model.pth")
    torch.save({
        "red_model": final_red_transformer_model.state_dict(),
        "blue_model": final_blue_transformer_model.state_dict()
    }, transformer_model_path)
    
    # 保存集成模型配置
    ensemble_config = {
        "lstm_model_path": lstm_model_path,
        "transformer_model_path": transformer_model_path,
        "weights": [0.6, 0.4]  # LSTM权重和Transformer权重
    }
    
    # 保存为主模型
    torch.save({
        "ensemble_config": ensemble_config,
        "red_lstm_model": final_red_lstm_model.state_dict(),
        "blue_lstm_model": final_blue_lstm_model.state_dict(),
        "red_transformer_model": final_red_transformer_model.state_dict(),
        "blue_transformer_model": final_blue_transformer_model.state_dict()
    }, MODEL_PATH)
    
    # 保存缩放器
    joblib.dump(dataset.scaler_X, SCALER_PATH)
    
    logger.info(f"LSTM模型已保存到 {lstm_model_path}")
    logger.info(f"Transformer模型已保存到 {transformer_model_path}")
    logger.info(f"集成模型已保存到 {MODEL_PATH}")
    logger.info(f"缩放器已保存到 {SCALER_PATH}")
    
    # 测试模型预测的随机性
    test_model_randomness(final_red_lstm_model, final_blue_lstm_model, 
                          final_red_transformer_model, final_blue_transformer_model,
                          dataset, device)

def test_model_randomness(red_lstm, blue_lstm, red_transformer, blue_transformer, dataset, device):
    """测试模型预测的随机性"""
    logger.info("测试模型预测的随机性...")
    
    # 设置为评估模式
    red_lstm.eval()
    blue_lstm.eval()
    red_transformer.eval()
    blue_transformer.eval()
    
    # 获取最近的数据作为测试输入
    test_features = dataset.features[-5:].unsqueeze(0).to(device)
    
    # 生成10次预测，检查结果的多样性
    red_predictions_all = []
    blue_predictions_all = []
    
    with torch.no_grad():
        for i in range(10):
            # 添加随机噪声到输入
            noisy_features = test_features + torch.randn_like(test_features) * 0.02
            
            # LSTM预测
            red_lstm_logits = red_lstm.lstm(noisy_features)
            red_lstm_logits = red_lstm.fc(red_lstm_logits[0])
            red_lstm_logits = red_lstm_logits.view(-1, red_lstm.output_seq_length, red_lstm.output_dim)
            red_lstm_preds = red_lstm.crf.decode(red_lstm_logits)
            
            blue_lstm_logits = blue_lstm.lstm(noisy_features)
            blue_lstm_logits = blue_lstm.fc(blue_lstm_logits[0])
            blue_lstm_logits = blue_lstm_logits.view(-1, blue_lstm.output_seq_length, blue_lstm.output_dim)
            blue_lstm_preds = blue_lstm.crf.decode(blue_lstm_logits)
            
            # 转换预测结果
            red_nums = [num + 1 for num in red_lstm_preds[0]]  # 加1转回原始标签
            blue_nums = [num + 1 for num in blue_lstm_preds[0]]
            
            # 确保红球唯一且排序
            red_nums = sorted(list(set(red_nums)))
            while len(red_nums) < RED_BALLS:
                new_num = np.random.randint(1, RED_CLASSES + 1)
                if new_num not in red_nums:
                    red_nums.append(new_num)
            red_nums = sorted(red_nums[:RED_BALLS])
            
            red_predictions_all.append(red_nums)
            blue_predictions_all.append(blue_nums)
            
            logger.info(f"测试预测 {i+1}: 红球 {red_nums}, 蓝球 {blue_nums}")
    
    # 分析预测结果的多样性
    unique_predictions = set(tuple(pred) for pred in red_predictions_all)
    logger.info(f"10次预测中，红球唯一组合数: {len(unique_predictions)}")
    
    # 如果唯一组合数太少，说明模型可能过拟合
    if len(unique_predictions) < 5:
        logger.warning("警告：模型预测多样性不足，可能存在过拟合问题")
        logger.info("建议：1. 增加数据增强 2. 提高dropout率 3. 增加正则化强度")


if __name__ == "__main__":
    logger.info("开始训练模型...")
    train_model()
    logger.info("模型训练完成。")
