import os
import sys
import subprocess
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
import joblib
from loguru import logger
from sklearn.metrics import accuracy_score
import copy

# 配置安全的loguru
try:
    from safe_logger_config import configure_safe_logger
    logger = configure_safe_logger()
except ImportError:
    # 降级配置 - 移除不支持的参数
    from loguru import logger
    logger.remove()
    logger.add(sys.stdout, 
              format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", 
              level="INFO")

# ---------------- 配置 ----------------
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(current_dir, "dlt_history.csv")
MODEL_PATH = os.path.join(current_dir, "dlt_model.pth")
SCALER_PATH = os.path.join(current_dir, "scaler_X.pkl")
BATCH_SIZE = 32
EPOCHS = 1000  # 增加训练轮数
LEARNING_RATE = 0.001
WINDOW_SIZE = 10
PATIENCE = 10  # 早停耐心值

# 获取项目根目录
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

# 修复导入问题
try:
    from model import LstmCRFModel  # 使用绝对导入
except ImportError as e:
    logger.error(f"导入模型类失败: {e}")
    sys.exit(1)

from sklearn.model_selection import train_test_split

class LotteryDataset(Dataset):
    def __init__(self, csv_file, window_size, red_balls=5, blue_balls=2):
        self.data = pd.read_csv(csv_file)

        # 映射列名到代码中预期的列名
        self.data.rename(columns={
            '红球_1': 'Red_1',
            '红球_2': 'Red_2',
            '红球_3': 'Red_3',
            '红球_4': 'Red_4',
            '红球_5': 'Red_5',
            '蓝球_1': 'Blue_1',
            '蓝球_2': 'Blue_2'
        }, inplace=True)

        self.scaler_X = MinMaxScaler()
        self.features, self.labels = self.preprocess(self.data, window_size, red_balls, blue_balls)

    def preprocess(self, data, window_size, red_balls, blue_balls):
        features, labels = [], []
        expected_columns = 1 + red_balls + blue_balls
        if len(data.columns) < expected_columns:
            raise ValueError(f"数据列数不足，当前列数: {len(data.columns)}，期望至少 {expected_columns} 列。")

        for i in range(len(data) - window_size):
            # 特征：选取窗口内的红球和蓝球数据
            feature_window = data.iloc[i:i + window_size, 1:1 + red_balls + blue_balls].values
            features.append(feature_window)

            # 标签：下一期的红球和蓝球
            red_labels_seq = data.iloc[i + window_size, 1:1 + red_balls].values - 1  # 减1使其从0开始
            blue_label = data.iloc[i + window_size, 1 + red_balls:1 + red_balls + blue_balls].values - 1
            combined_labels = np.concatenate((red_labels_seq, blue_label))
            labels.append(combined_labels)

        # 转换为 NumPy 数组并进行缩放
        features_np = np.array(features)  # 形状: (num_samples, window_size, feature_dim)
        features_scaled = self.scaler_X.fit_transform(features_np.reshape(-1, features_np.shape[-1])).reshape(features_np.shape)

        labels_np = np.array(labels)  # 形状: (num_samples, total_labels)

        return (
            torch.tensor(features_scaled, dtype=torch.float32),
            torch.tensor(labels_np, dtype=torch.long)
        )

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]
def fetch_data_if_not_exists():
    """
    检查 CSV 文件是否存在，如果不存在，则调用 fetch_dlt_data.py 获取数据
    """
    if not os.path.exists(DATA_FILE):
        logger.info(f"数据文件 {DATA_FILE} 不存在，开始获取数据...")
        fetch_script = os.path.join(current_dir, 'fetch_dlt_data.py')
        if not os.path.exists(fetch_script):
            logger.error(f"数据获取脚本不存在: {fetch_script}")
            sys.exit(1)
        try:
            # 使用当前运行的 Python 解释器
            python_executable = sys.executable
            logger.info(f"运行数据获取脚本: {fetch_script} 使用解释器: {python_executable}")
            subprocess.run([python_executable, fetch_script], check=True, 
                          encoding='utf-8', errors='replace')
            logger.info("数据获取完成。")
        except subprocess.CalledProcessError as e:
            logger.error(f"运行数据获取脚本失败: {e}")
            sys.exit(1)
    else:
        logger.info(f"数据文件 {DATA_FILE} 已存在。")

# 在import部分添加
import torch.nn as nn
from sklearn.model_selection import KFold
from torch.nn import TransformerEncoder, TransformerEncoderLayer

# 添加Transformer模型类
class TransformerModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, output_seq_length, num_layers=2, nhead=4, dropout=0.1):
        super(TransformerModel, self).__init__()
        self.model_type = 'Transformer'
        self.pos_encoder = nn.Linear(input_dim, hidden_dim)
        encoder_layers = TransformerEncoderLayer(d_model=hidden_dim, nhead=nhead, dim_feedforward=hidden_dim*4, dropout=dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, num_layers)
        self.decoder = nn.Linear(hidden_dim, output_dim)
        self.output_dim = output_dim
        self.output_seq_length = output_seq_length
        self.hidden_dim = hidden_dim
        self.dropout = nn.Dropout(dropout)
        self.init_weights()
        
    def init_weights(self):
        initrange = 0.1
        self.decoder.bias.data.zero_()
        self.decoder.weight.data.uniform_(-initrange, initrange)
        
    def forward(self, src, target=None, mask=None):
        # src shape: [batch_size, seq_len, input_dim]
        src = src.permute(1, 0, 2)  # [seq_len, batch_size, input_dim]
        src = self.pos_encoder(src)
        output = self.transformer_encoder(src)
        output = self.dropout(output)
        output = self.decoder(output)
        output = output.mean(dim=0)  # [batch_size, output_dim]
        
        if target is not None and mask is not None:
            # 计算损失
            loss_fct = nn.CrossEntropyLoss(reduction='none')
            loss = 0
            for i in range(self.output_seq_length):
                active_loss = mask[:, i].float()
                if active_loss.sum() > 0:
                    active_logits = output.view(-1, self.output_dim)
                    active_labels = target[:, i].view(-1)
                    loss += (loss_fct(active_logits, active_labels) * active_loss).sum() / active_loss.sum()
            return loss / self.output_seq_length
        
        # 预测模式
        predictions = []
        for _ in range(self.output_seq_length):
            pred = torch.argmax(output, dim=1).cpu().numpy()
            predictions.append(pred)
        return predictions

# 集成模型类
class EnsembleModel:
    def __init__(self, lstm_model, transformer_model, weights=[0.6, 0.4]):
        self.lstm_model = lstm_model
        self.transformer_model = transformer_model
        self.weights = weights
        
    def predict(self, features):
        self.lstm_model.eval()
        self.transformer_model.eval()
        
        with torch.no_grad():
            lstm_preds = self.lstm_model(features)
            transformer_preds = self.transformer_model(features)
            
            # 合并预测结果
            ensemble_preds = []
            for i in range(len(lstm_preds)):
                lstm_pred = lstm_preds[i]
                transformer_pred = transformer_preds[i]
                
                # 简单加权投票
                if np.random.rand() < self.weights[0]:
                    ensemble_preds.append(lstm_pred)
                else:
                    ensemble_preds.append(transformer_pred)
                    
            return ensemble_preds

def train_model():
    fetch_data_if_not_exists()

    if not os.path.exists(DATA_FILE):
        logger.error(f"数据文件不存在: {DATA_FILE}")
        sys.exit(1)

    # 数据加载
    logger.info("加载数据...")
    dataset = LotteryDataset(DATA_FILE, WINDOW_SIZE)
    
    # 使用K折交叉验证
    k_folds = 5
    kfold = KFold(n_splits=k_folds, shuffle=True, random_state=42)
    
    input_dim = dataset.features.shape[-1]
    
    # 检查 GPU 是否可用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 存储每个折叠的最佳模型
    best_models = []
    
    # 交叉验证训练
    logger.info(f"开始{k_folds}折交叉验证训练...")
    
    for fold, (train_ids, val_ids) in enumerate(kfold.split(dataset)):
        logger.info(f"开始第 {fold + 1} 折训练")
        
        # 准备数据加载器
        train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
        val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids)
        
        train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=train_subsampler)
        val_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=val_subsampler)
        
        # 初始化模型
        # LSTM-CRF模型
        red_lstm_model = LstmCRFModel(input_dim, hidden_dim=128, output_dim=35, output_seq_length=5, num_layers=2).to(device)
        blue_lstm_model = LstmCRFModel(input_dim, hidden_dim=128, output_dim=12, output_seq_length=2, num_layers=2).to(device)
        
        # Transformer模型
        red_transformer_model = TransformerModel(input_dim, hidden_dim=128, output_dim=35, output_seq_length=5).to(device)
        blue_transformer_model = TransformerModel(input_dim, hidden_dim=128, output_dim=12, output_seq_length=2).to(device)
        
        # 优化器
        red_lstm_optimizer = torch.optim.Adam(red_lstm_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
        blue_lstm_optimizer = torch.optim.Adam(blue_lstm_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
        red_transformer_optimizer = torch.optim.Adam(red_transformer_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
        blue_transformer_optimizer = torch.optim.Adam(blue_transformer_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
        
        # 学习率调度器
        red_lstm_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(red_lstm_optimizer, mode='min', factor=0.5, patience=5)
        blue_lstm_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(blue_lstm_optimizer, mode='min', factor=0.5, patience=5)
        red_transformer_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(red_transformer_optimizer, mode='min', factor=0.5, patience=5)
        blue_transformer_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(blue_transformer_optimizer, mode='min', factor=0.5, patience=5)
        
        # 训练过程
        best_val_loss = float('inf')
        best_fold_models = {
            'red_lstm': copy.deepcopy(red_lstm_model.state_dict()),
            'blue_lstm': copy.deepcopy(blue_lstm_model.state_dict()),
            'red_transformer': copy.deepcopy(red_transformer_model.state_dict()),
            'blue_transformer': copy.deepcopy(blue_transformer_model.state_dict())
        }
        trigger_times = 0
        
        for epoch in range(EPOCHS):
            # 训练模式
            red_lstm_model.train()
            blue_lstm_model.train()
            red_transformer_model.train()
            blue_transformer_model.train()
            
            total_red_lstm_loss = 0
            total_blue_lstm_loss = 0
            total_red_transformer_loss = 0
            total_blue_transformer_loss = 0
            
            for features, labels in train_loader:
                features = features.to(device)
                labels = labels.to(device)
                
                # 红球训练 - LSTM
                red_labels = labels[:, :5]
                red_mask = (red_labels >= 0)
                red_lstm_loss = red_lstm_model(features, red_labels, red_mask)
                red_lstm_optimizer.zero_grad()
                red_lstm_loss.backward()
                torch.nn.utils.clip_grad_norm_(red_lstm_model.parameters(), 1.0)  # 梯度裁剪
                red_lstm_optimizer.step()
                total_red_lstm_loss += red_lstm_loss.item()
                
                # 蓝球训练 - LSTM
                blue_labels = labels[:, 5:]
                blue_mask = (blue_labels >= 0)
                blue_lstm_loss = blue_lstm_model(features, blue_labels, blue_mask)
                blue_lstm_optimizer.zero_grad()
                blue_lstm_loss.backward()
                torch.nn.utils.clip_grad_norm_(blue_lstm_model.parameters(), 1.0)
                blue_lstm_optimizer.step()
                total_blue_lstm_loss += blue_lstm_loss.item()
                
                # 红球训练 - Transformer
                red_transformer_loss = red_transformer_model(features, red_labels, red_mask)
                red_transformer_optimizer.zero_grad()
                red_transformer_loss.backward()
                torch.nn.utils.clip_grad_norm_(red_transformer_model.parameters(), 1.0)
                red_transformer_optimizer.step()
                total_red_transformer_loss += red_transformer_loss.item()
                
                # 蓝球训练 - Transformer
                blue_transformer_loss = blue_transformer_model(features, blue_labels, blue_mask)
                blue_transformer_optimizer.zero_grad()
                blue_transformer_loss.backward()
                torch.nn.utils.clip_grad_norm_(blue_transformer_model.parameters(), 1.0)
                blue_transformer_optimizer.step()
                total_blue_transformer_loss += blue_transformer_loss.item()
            
            # 计算平均损失
            avg_red_lstm_loss = total_red_lstm_loss / len(train_loader)
            avg_blue_lstm_loss = total_blue_lstm_loss / len(train_loader)
            avg_red_transformer_loss = total_red_transformer_loss / len(train_loader)
            avg_blue_transformer_loss = total_blue_transformer_loss / len(train_loader)
            
            # 验证过程
            red_lstm_model.eval()
            blue_lstm_model.eval()
            red_transformer_model.eval()
            blue_transformer_model.eval()
            
            val_red_lstm_loss = 0
            val_blue_lstm_loss = 0
            val_red_transformer_loss = 0
            val_blue_transformer_loss = 0
            
            all_red_lstm_preds, all_red_transformer_preds = [], []
            all_blue_lstm_preds, all_blue_transformer_preds = [], []
            all_red_labels, all_blue_labels = [], []
            
            with torch.no_grad():
                for features, labels in val_loader:
                    features = features.to(device)
                    labels = labels.to(device)
                    
                    # 红球验证 - LSTM
                    red_labels = labels[:, :5]
                    red_mask = (red_labels >= 0)
                    red_lstm_loss = red_lstm_model(features, red_labels, red_mask)
                    val_red_lstm_loss += red_lstm_loss.item()
                    red_lstm_preds = red_lstm_model(features)
                    all_red_lstm_preds.extend([pred for sequence in red_lstm_preds for pred in sequence])
                    
                    # 红球验证 - Transformer
                    red_transformer_loss = red_transformer_model(features, red_labels, red_mask)
                    val_red_transformer_loss += red_transformer_loss.item()
                    red_transformer_preds = red_transformer_model(features)
                    all_red_transformer_preds.extend([pred for sequence in red_transformer_preds for pred in sequence])
                    
                    all_red_labels.extend(red_labels.cpu().numpy().flatten())
                    
                    # 蓝球验证 - LSTM
                    blue_labels = labels[:, 5:]
                    blue_mask = (blue_labels >= 0)
                    blue_lstm_loss = blue_lstm_model(features, blue_labels, blue_mask)
                    val_blue_lstm_loss += blue_lstm_loss.item()
                    blue_lstm_preds = blue_lstm_model(features)
                    all_blue_lstm_preds.extend([pred for sequence in blue_lstm_preds for pred in sequence])
                    
                    # 蓝球验证 - Transformer
                    blue_transformer_loss = blue_transformer_model(features, blue_labels, blue_mask)
                    val_blue_transformer_loss += blue_transformer_loss.item()
                    blue_transformer_preds = blue_transformer_model(features)
                    all_blue_transformer_preds.extend([pred for sequence in blue_transformer_preds for pred in sequence])
                    
                    all_blue_labels.extend(blue_labels.cpu().numpy().flatten())
            
            # 计算验证集平均损失
            avg_val_red_lstm_loss = val_red_lstm_loss / len(val_loader)
            avg_val_blue_lstm_loss = val_blue_lstm_loss / len(val_loader)
            avg_val_red_transformer_loss = val_red_transformer_loss / len(val_loader)
            avg_val_blue_transformer_loss = val_blue_transformer_loss / len(val_loader)
            
            # 计算准确率
            red_lstm_accuracy = accuracy_score(all_red_labels, all_red_lstm_preds)
            blue_lstm_accuracy = accuracy_score(all_blue_labels, all_blue_lstm_preds)
            red_transformer_accuracy = accuracy_score(all_red_labels, all_red_transformer_preds)
            blue_transformer_accuracy = accuracy_score(all_blue_labels, all_blue_transformer_preds)
            
            # 集成模型预测
            all_red_ensemble_preds = []
            all_blue_ensemble_preds = []
            
            for i in range(len(all_red_lstm_preds)):
                # 简单投票集成
                if np.random.rand() < 0.6:  # 60%权重给LSTM
                    all_red_ensemble_preds.append(all_red_lstm_preds[i])
                else:
                    all_red_ensemble_preds.append(all_red_transformer_preds[i])
                    
            for i in range(len(all_blue_lstm_preds)):
                if np.random.rand() < 0.6:
                    all_blue_ensemble_preds.append(all_blue_lstm_preds[i])
                else:
                    all_blue_ensemble_preds.append(all_blue_transformer_preds[i])
            
            # 计算集成模型准确率
            red_ensemble_accuracy = accuracy_score(all_red_labels, all_red_ensemble_preds)
            blue_ensemble_accuracy = accuracy_score(all_blue_labels, all_blue_ensemble_preds)
            
            logger.info(f"Fold {fold+1}, Epoch {epoch+1}: "
                        f"LSTM - 红球 Loss = {avg_val_red_lstm_loss:.4f}, 蓝球 Loss = {avg_val_blue_lstm_loss:.4f}, "
                        f"红球 Acc = {red_lstm_accuracy:.4f}, 蓝球 Acc = {blue_lstm_accuracy:.4f} | "
                        f"Transformer - 红球 Loss = {avg_val_red_transformer_loss:.4f}, 蓝球 Loss = {avg_val_blue_transformer_loss:.4f}, "
                        f"红球 Acc = {red_transformer_accuracy:.4f}, 蓝球 Acc = {blue_transformer_accuracy:.4f} | "
                        f"集成 - 红球 Acc = {red_ensemble_accuracy:.4f}, 蓝球 Acc = {blue_ensemble_accuracy:.4f}")
            
            # 更新学习率
            total_val_lstm_loss = avg_val_red_lstm_loss + avg_val_blue_lstm_loss
            total_val_transformer_loss = avg_val_red_transformer_loss + avg_val_blue_transformer_loss
            
            red_lstm_scheduler.step(avg_val_red_lstm_loss)
            blue_lstm_scheduler.step(avg_val_blue_lstm_loss)
            red_transformer_scheduler.step(avg_val_red_transformer_loss)
            blue_transformer_scheduler.step(avg_val_blue_transformer_loss)
            
            # 早停检查 - 使用集成模型的总损失
            total_val_loss = (total_val_lstm_loss + total_val_transformer_loss) / 2
            
            if total_val_loss < best_val_loss:
                best_val_loss = total_val_loss
                best_fold_models = {
                    'red_lstm': copy.deepcopy(red_lstm_model.state_dict()),
                    'blue_lstm': copy.deepcopy(blue_lstm_model.state_dict()),
                    'red_transformer': copy.deepcopy(red_transformer_model.state_dict()),
                    'blue_transformer': copy.deepcopy(blue_transformer_model.state_dict())
                }
                trigger_times = 0
            else:
                trigger_times += 1
                if trigger_times >= PATIENCE:
                    logger.info(f"早停触发，第 {fold+1} 折训练停止在第 {epoch+1} 轮")
                    break
        
        # 保存当前折叠的最佳模型
        best_models.append(best_fold_models)
        logger.info(f"第 {fold+1} 折训练完成，最佳验证损失: {best_val_loss:.4f}")
    
    # 集成所有折叠的最佳模型
    logger.info("集成所有折叠的最佳模型...")
    
    # 初始化最终模型
    final_red_lstm_model = LstmCRFModel(input_dim, hidden_dim=128, output_dim=35, output_seq_length=5, num_layers=2).to(device)
    final_blue_lstm_model = LstmCRFModel(input_dim, hidden_dim=128, output_dim=12, output_seq_length=2, num_layers=2).to(device)
    final_red_transformer_model = TransformerModel(input_dim, hidden_dim=128, output_dim=35, output_seq_length=5).to(device)
    final_blue_transformer_model = TransformerModel(input_dim, hidden_dim=128, output_dim=12, output_seq_length=2).to(device)
    
    # 使用最后一折的模型权重作为基础
    final_red_lstm_model.load_state_dict(best_models[-1]['red_lstm'])
    final_blue_lstm_model.load_state_dict(best_models[-1]['blue_lstm'])
    final_red_transformer_model.load_state_dict(best_models[-1]['red_transformer'])
    final_blue_transformer_model.load_state_dict(best_models[-1]['blue_transformer'])
    
    # 创建集成模型
    red_ensemble = EnsembleModel(final_red_lstm_model, final_red_transformer_model)
    blue_ensemble = EnsembleModel(final_blue_lstm_model, final_blue_transformer_model)
    
    # 保存模型和缩放器
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    
    # 保存LSTM模型
    lstm_model_path = os.path.join(os.path.dirname(MODEL_PATH), "dlt_lstm_model.pth")
    torch.save({
        "red_model": final_red_lstm_model.state_dict(),
        "blue_model": final_blue_lstm_model.state_dict()
    }, lstm_model_path)
    
    # 保存Transformer模型
    transformer_model_path = os.path.join(os.path.dirname(MODEL_PATH), "dlt_transformer_model.pth")
    torch.save({
        "red_model": final_red_transformer_model.state_dict(),
        "blue_model": final_blue_transformer_model.state_dict()
    }, transformer_model_path)
    
    # 保存集成模型配置
    ensemble_config = {
        "lstm_model_path": lstm_model_path,
        "transformer_model_path": transformer_model_path,
        "weights": [0.6, 0.4]  # LSTM权重和Transformer权重
    }
    
    # 保存为主模型
    torch.save({
        "ensemble_config": ensemble_config,
        "red_lstm_model": final_red_lstm_model.state_dict(),
        "blue_lstm_model": final_blue_lstm_model.state_dict(),
        "red_transformer_model": final_red_transformer_model.state_dict(),
        "blue_transformer_model": final_blue_transformer_model.state_dict()
    }, MODEL_PATH)
    
    # 保存缩放器
    joblib.dump(dataset.scaler_X, SCALER_PATH)
    
    logger.info(f"LSTM模型已保存到 {lstm_model_path}")
    logger.info(f"Transformer模型已保存到 {transformer_model_path}")
    logger.info(f"集成模型已保存到 {MODEL_PATH}")
    logger.info(f"缩放器已保存到 {SCALER_PATH}")

if __name__ == "__main__":
    logger.info("开始训练模型...")
    train_model()
    logger.info("模型训练完成。")
