import os
import sys
import subprocess
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
import joblib
from loguru import logger
from sklearn.metrics import accuracy_score
import copy

# 配置安全的loguru
try:
    from safe_logger_config import configure_safe_logger
    logger = configure_safe_logger()
except ImportError:
    # 降级配置
    from loguru import logger
    logger.remove()
    logger.add(sys.stdout, 
              format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", 
              level="INFO")

# ---------------- 配置 ----------------
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(current_dir, "plw_history.csv")
MODEL_PATH = os.path.join(current_dir, "plw_model.pth")
SCALER_PATH = os.path.join(current_dir, "scaler_X.pkl")
BATCH_SIZE = 32
EPOCHS = 1000  # 增加训练轮数
LEARNING_RATE = 0.001
WINDOW_SIZE = 10
PATIENCE = 10  # 早停耐心值

# 获取项目根目录
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

try:
    from model import LstmCRFModel  # 使用绝对导入
except ImportError as e:
    logger.error(f"导入模型类失败: {e}")
    sys.exit(1)

from sklearn.model_selection import train_test_split

class PLWDataset(Dataset):
    def __init__(self, csv_file, window_size):
        """
        排列5数据集
        排列5只有5个数字，范围1-9，不分红球蓝球
        """
        self.data = pd.read_csv(csv_file)
        logger.info(f"原始数据形状: {self.data.shape}")
        logger.info(f"数据列: {self.data.columns.tolist()}")
        
        # 检查数据格式并预处理
        self.preprocess_data()
        
        self.scaler_X = MinMaxScaler()
        self.features, self.labels = self.prepare_sequences(window_size)

    def preprocess_data(self):
        """预处理排列5数据"""
        # 检查数据格式
        if 'draw_numbers' in self.data.columns:
            # 如果是逗号分隔的格式，需要拆分
            self.data['numbers'] = self.data['draw_numbers'].str.split(',')
            
            # 创建5个号码列（排列5的数字范围0-9）
            for i in range(5):
                self.data[f'num_{i+1}'] = self.data['numbers'].apply(
                    lambda x: int(x[i]) if len(x) > i and x[i].strip().isdigit() and 0 <= int(x[i]) <= 9 else 0
                )
            
            # 删除临时列
            self.data.drop(['numbers'], axis=1, inplace=True)
        
        # 确保有期号列
        if 'draw_issue' not in self.data.columns and '期数' not in self.data.columns:
            self.data['draw_issue'] = range(len(self.data))
        
        # 标准化列名
        number_columns = [col for col in self.data.columns if 'num_' in col or '号码' in col]
        if len(number_columns) < 5:
            logger.error(f"数据格式错误：找到的号码列数量不足: {number_columns}")
            raise ValueError("排列5数据必须包含5个号码")
        
        # 重命名为标准格式
        for i in range(5):
            if f'num_{i+1}' not in self.data.columns:
                self.data[f'num_{i+1}'] = self.data[number_columns[i]]
        
        # 保留需要的列
        self.data = self.data[['draw_issue'] + [f'num_{i+1}' for i in range(5)]].copy()
        
        # 按期号排序
        if 'draw_issue' in self.data.columns:
            self.data['draw_issue'] = pd.to_numeric(self.data['draw_issue'], errors='coerce')
            self.data = self.data.dropna(subset=['draw_issue']).sort_values(by='draw_issue').reset_index(drop=True)
        
        logger.info(f"预处理后数据形状: {self.data.shape}")
        logger.info(f"数据样例:\n{self.data.head()}")

    def prepare_sequences(self, window_size):
        """准备序列数据"""
        features, labels = [], []
        
        # 获取号码数据（排除期号列）
        number_data = self.data[[f'num_{i+1}' for i in range(5)]].values
        
        for i in range(len(number_data) - window_size):
            # 特征：窗口内的5个号码
            feature_window = number_data[i:i + window_size]  # shape: (window_size, 5)
            features.append(feature_window)

            # 标签：下一期的5个号码（排列5数字范围0-9，不需要减1）
            next_numbers = number_data[i + window_size]  # shape: (5,)
            labels.append(next_numbers)

        # 转换为 NumPy 数组
        features_np = np.array(features)  # shape: (num_samples, window_size, 5)
        labels_np = np.array(labels)      # shape: (num_samples, 5)
        
        # 对特征进行缩放
        original_shape = features_np.shape
        features_scaled = self.scaler_X.fit_transform(
            features_np.reshape(-1, features_np.shape[-1])
        ).reshape(original_shape)

        logger.info(f"特征形状: {features_scaled.shape}")
        logger.info(f"标签形状: {labels_np.shape}")
        logger.info(f"标签范围: {labels_np.min()} - {labels_np.max()}")

        return (
            torch.tensor(features_scaled, dtype=torch.float32),
            torch.tensor(labels_np, dtype=torch.long)
        )

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]

def fetch_data_if_not_exists():
    """
    检查 CSV 文件是否存在，如果不存在，则调用 fetch_plw_data.py 获取数据
    """
    if not os.path.exists(DATA_FILE):
        logger.info(f"数据文件 {DATA_FILE} 不存在，开始获取数据...")
        fetch_script = os.path.join(current_dir, 'fetch_plw_data.py')
        if not os.path.exists(fetch_script):
            logger.error(f"数据获取脚本不存在: {fetch_script}")
            sys.exit(1)
        try:
            # 使用当前运行的 Python 解释器
            python_executable = sys.executable
            logger.info(f"运行数据获取脚本: {fetch_script} 使用解释器: {python_executable}")
            subprocess.run([python_executable, fetch_script], check=True, 
                          encoding='utf-8', errors='replace')
            logger.info("数据获取完成。")
        except subprocess.CalledProcessError as e:
            logger.error(f"运行数据获取脚本失败: {e}")
            sys.exit(1)
    else:
        logger.info(f"数据文件 {DATA_FILE} 已存在。")

# 在import部分添加
import torch.nn as nn
from sklearn.model_selection import KFold
from torch.nn import TransformerEncoder, TransformerEncoderLayer

# 添加Transformer模型类
class TransformerModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, output_seq_length, num_layers=2, nhead=4, dropout=0.1):
        super(TransformerModel, self).__init__()
        self.model_type = 'Transformer'
        self.pos_encoder = nn.Linear(input_dim, hidden_dim)
        encoder_layers = TransformerEncoderLayer(d_model=hidden_dim, nhead=nhead, dim_feedforward=hidden_dim*4, dropout=dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, num_layers)
        self.decoder = nn.Linear(hidden_dim, output_dim)
        self.output_dim = output_dim
        self.output_seq_length = output_seq_length
        self.hidden_dim = hidden_dim
        self.dropout = nn.Dropout(dropout)
        self.init_weights()
        
    def init_weights(self):
        initrange = 0.1
        self.decoder.bias.data.zero_()
        self.decoder.weight.data.uniform_(-initrange, initrange)
        
    def forward(self, src, target=None, mask=None):
        # src shape: [batch_size, seq_len, input_dim]
        src = src.permute(1, 0, 2)  # [seq_len, batch_size, input_dim]
        src = self.pos_encoder(src)
        output = self.transformer_encoder(src)
        output = self.dropout(output)
        output = self.decoder(output)
        output = output.mean(dim=0)  # [batch_size, output_dim]
        
        if target is not None and mask is not None:
            # 计算损失
            loss_fct = nn.CrossEntropyLoss(reduction='none')
            loss = 0
            for i in range(self.output_seq_length):
                active_loss = mask[:, i].float()
                if active_loss.sum() > 0:
                    active_logits = output.view(-1, self.output_dim)
                    active_labels = target[:, i].view(-1)
                    loss += (loss_fct(active_logits, active_labels) * active_loss).sum() / active_loss.sum()
            return loss / self.output_seq_length
        
        # 预测模式
        predictions = []
        for _ in range(self.output_seq_length):
            pred = torch.argmax(output, dim=1).cpu().numpy()
            predictions.append(pred)
        return predictions

# 集成模型类
class EnsembleModel:
    def __init__(self, lstm_model, transformer_model, weights=[0.6, 0.4]):
        self.lstm_model = lstm_model
        self.transformer_model = transformer_model
        self.weights = weights
        
    def predict(self, features):
        self.lstm_model.eval()
        self.transformer_model.eval()
        
        with torch.no_grad():
            lstm_preds = self.lstm_model(features)
            transformer_preds = self.transformer_model(features)
            
            # 合并预测结果
            ensemble_preds = []
            for i in range(len(lstm_preds)):
                lstm_pred = lstm_preds[i]
                transformer_pred = transformer_preds[i]
                
                # 简单加权投票
                if np.random.rand() < self.weights[0]:
                    ensemble_preds.append(lstm_pred)
                else:
                    ensemble_preds.append(transformer_pred)
                    
            return ensemble_preds

def train_model():
    fetch_data_if_not_exists()

    if not os.path.exists(DATA_FILE):
        logger.error(f"数据文件不存在: {DATA_FILE}")
        sys.exit(1)

    # 数据加载
    logger.info("加载排列5数据...")
    dataset = PLWDataset(DATA_FILE, WINDOW_SIZE)
    
    # 使用K折交叉验证
    k_folds = 5
    kfold = KFold(n_splits=k_folds, shuffle=True, random_state=42)
    
    input_dim = dataset.features.shape[-1]  # 5个号码位置
    
    # 检查 GPU 是否可用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 存储每个折叠的最佳模型
    best_models = []
    
    # 交叉验证训练
    logger.info(f"开始{k_folds}折交叉验证训练...")
    
    for fold, (train_ids, val_ids) in enumerate(kfold.split(dataset)):
        logger.info(f"开始第 {fold + 1} 折训练")
        
        # 准备数据加载器
        train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
        val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids)
        
        train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=train_subsampler)
        val_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=val_subsampler)
        
        # 初始化模型（排列5只需要一个模型，输出5个位置的数字）
        # LSTM-CRF模型 - 输出维度为10（0-9数字）
        lstm_model = LstmCRFModel(input_dim, hidden_dim=128, output_dim=10, output_seq_length=5, num_layers=2).to(device)
        
        # Transformer模型
        transformer_model = TransformerModel(input_dim, hidden_dim=128, output_dim=10, output_seq_length=5).to(device)
        
        # 优化器
        lstm_optimizer = torch.optim.Adam(lstm_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
        transformer_optimizer = torch.optim.Adam(transformer_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
        
        # 学习率调度器
        lstm_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(lstm_optimizer, mode='min', factor=0.5, patience=5)
        transformer_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(transformer_optimizer, mode='min', factor=0.5, patience=5)
        
        # 训练过程
        best_val_loss = float('inf')
        best_fold_models = {
            'lstm': copy.deepcopy(lstm_model.state_dict()),
            'transformer': copy.deepcopy(transformer_model.state_dict())
        }
        trigger_times = 0
        
        for epoch in range(EPOCHS):
            # 训练模式
            lstm_model.train()
            transformer_model.train()
            
            total_lstm_loss = 0
            total_transformer_loss = 0
            
            for features, labels in train_loader:
                features = features.to(device)
                labels = labels.to(device)
                
                # LSTM训练
                # 对于排列5，所有标签都是有效的（0-9），创建全True掩码
                mask = torch.ones_like(labels, dtype=torch.bool)  # 创建全True掩码
                lstm_loss = lstm_model(features, labels, mask)
                lstm_optimizer.zero_grad()
                lstm_loss.backward()
                torch.nn.utils.clip_grad_norm_(lstm_model.parameters(), 1.0)  # 梯度裁剪
                lstm_optimizer.step()
                total_lstm_loss += lstm_loss.item()
                
                # Transformer训练
                transformer_loss = transformer_model(features, labels, mask)
                transformer_optimizer.zero_grad()
                transformer_loss.backward()
                torch.nn.utils.clip_grad_norm_(transformer_model.parameters(), 1.0)
                transformer_optimizer.step()
                total_transformer_loss += transformer_loss.item()
            
            # 计算平均损失
            avg_lstm_loss = total_lstm_loss / len(train_loader)
            avg_transformer_loss = total_transformer_loss / len(train_loader)
            
            # 验证过程
            lstm_model.eval()
            transformer_model.eval()
            
            val_lstm_loss = 0
            val_transformer_loss = 0
            
            all_lstm_preds, all_transformer_preds = [], []
            all_labels = []
            
            with torch.no_grad():
                for features, labels in val_loader:
                    features = features.to(device)
                    labels = labels.to(device)
                    
                    # 验证损失计算
                    # 对于排列5，所有标签都是有效的，创建全True掩码
                    mask = torch.ones_like(labels, dtype=torch.bool)
                    lstm_loss = lstm_model(features, labels, mask)
                    transformer_loss = transformer_model(features, labels, mask)
                    val_lstm_loss += lstm_loss.item()
                    val_transformer_loss += transformer_loss.item()
                    
                    # 预测结果
                    lstm_preds = lstm_model(features)
                    transformer_preds = transformer_model(features)
                    
                    all_lstm_preds.extend([pred for sequence in lstm_preds for pred in sequence])
                    all_transformer_preds.extend([pred for sequence in transformer_preds for pred in sequence])
                    all_labels.extend(labels.cpu().numpy().flatten())
            
            # 计算验证集平均损失
            avg_val_lstm_loss = val_lstm_loss / len(val_loader)
            avg_val_transformer_loss = val_transformer_loss / len(val_loader)
            
            # 计算准确率
            lstm_accuracy = accuracy_score(all_labels, all_lstm_preds)
            transformer_accuracy = accuracy_score(all_labels, all_transformer_preds)
            
            # 集成模型预测
            all_ensemble_preds = []
            for i in range(len(all_lstm_preds)):
                # 简单投票集成
                if np.random.rand() < 0.6:  # 60%权重给LSTM
                    all_ensemble_preds.append(all_lstm_preds[i])
                else:
                    all_ensemble_preds.append(all_transformer_preds[i])
            
            # 计算集成模型准确率
            ensemble_accuracy = accuracy_score(all_labels, all_ensemble_preds)
            
            logger.info(f"Fold {fold+1}, Epoch {epoch+1}: "
                        f"LSTM - Loss = {avg_val_lstm_loss:.4f}, Acc = {lstm_accuracy:.4f} | "
                        f"Transformer - Loss = {avg_val_transformer_loss:.4f}, Acc = {transformer_accuracy:.4f} | "
                        f"集成 - Acc = {ensemble_accuracy:.4f}")
            
            # 更新学习率
            lstm_scheduler.step(avg_val_lstm_loss)
            transformer_scheduler.step(avg_val_transformer_loss)
            
            # 早停检查 - 使用集成模型的总损失
            total_val_loss = (avg_val_lstm_loss + avg_val_transformer_loss) / 2
            
            if total_val_loss < best_val_loss:
                best_val_loss = total_val_loss
                best_fold_models = {
                    'lstm': copy.deepcopy(lstm_model.state_dict()),
                    'transformer': copy.deepcopy(transformer_model.state_dict())
                }
                trigger_times = 0
            else:
                trigger_times += 1
                if trigger_times >= PATIENCE:
                    logger.info(f"早停触发，第 {fold+1} 折训练停止在第 {epoch+1} 轮")
                    break
        
        # 保存当前折叠的最佳模型
        best_models.append(best_fold_models)
        logger.info(f"第 {fold+1} 折训练完成，最佳验证损失: {best_val_loss:.4f}")
    
    # 集成所有折叠的最佳模型
    logger.info("集成所有折叠的最佳模型...")
    
    # 初始化最终模型
    final_lstm_model = LstmCRFModel(input_dim, hidden_dim=128, output_dim=10, output_seq_length=5, num_layers=2).to(device)
    final_transformer_model = TransformerModel(input_dim, hidden_dim=128, output_dim=10, output_seq_length=5).to(device)
    
    # 使用最后一折的模型权重作为基础
    final_lstm_model.load_state_dict(best_models[-1]['lstm'])
    final_transformer_model.load_state_dict(best_models[-1]['transformer'])
    
    # 创建集成模型
    ensemble_model = EnsembleModel(final_lstm_model, final_transformer_model)
    
    # 保存模型和缩放器
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    
    # 保存LSTM模型
    lstm_model_path = os.path.join(os.path.dirname(MODEL_PATH), "plw_lstm_model.pth")
    torch.save({
        "model": final_lstm_model.state_dict()
    }, lstm_model_path)
    
    # 保存Transformer模型
    transformer_model_path = os.path.join(os.path.dirname(MODEL_PATH), "plw_transformer_model.pth")
    torch.save({
        "model": final_transformer_model.state_dict()
    }, transformer_model_path)
    
    # 保存集成模型配置
    ensemble_config = {
        "lstm_model_path": lstm_model_path,
        "transformer_model_path": transformer_model_path,
        "weights": [0.6, 0.4]  # LSTM权重和Transformer权重
    }
    
    # 保存为主模型
    torch.save({
        "ensemble_config": ensemble_config,
        "lstm_model": final_lstm_model.state_dict(),
        "transformer_model": final_transformer_model.state_dict()
    }, MODEL_PATH)
    
    # 保存缩放器
    joblib.dump(dataset.scaler_X, SCALER_PATH)
    
    logger.info(f"LSTM模型已保存到 {lstm_model_path}")
    logger.info(f"Transformer模型已保存到 {transformer_model_path}")
    logger.info(f"集成模型已保存到 {MODEL_PATH}")
    logger.info(f"缩放器已保存到 {SCALER_PATH}")

if __name__ == "__main__":
    logger.info("开始训练排列5模型...")
    train_model()
    logger.info("排列5模型训练完成。")