import os
import sys
import subprocess
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
import joblib
from loguru import logger
from sklearn.metrics import accuracy_score
import copy

# 配置安全的loguru
try:
    from safe_logger_config import configure_safe_logger
    logger = configure_safe_logger()
except ImportError:
    # 降级配置
    from loguru import logger
    logger.remove()
    logger.add(sys.stdout, 
              format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", 
              level="INFO")

# ---------------- 配置 ----------------
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(current_dir, "fc3d_history.csv")
MODEL_PATH = os.path.join(current_dir, "3d_sequence_lstm_model.pth")
SCALER_PATH = os.path.join(current_dir, "scaler_X.pkl")
BATCH_SIZE = 32
EPOCHS = 1000  # 增加训练轮数
LEARNING_RATE = 0.001
WINDOW_SIZE = 10
PATIENCE = 10  # 早停耐心值

# 获取项目根目录
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

try:
    from algorithms.fc3d_data_processor import Data3DSequenceLSTM, Data3DProcessor
except ImportError as e:
    logger.error(f"导入3D序列LSTM模型类失败: {e}")
    sys.exit(1)

from sklearn.model_selection import train_test_split

class D3SequenceDataset(Dataset):
    def __init__(self, csv_file, window_size):
        """
        3D序列数据集
        3D只有3个数字，范围0-9，不分红球蓝球
        """
        self.data = pd.read_csv(csv_file)
        logger.info(f"原始数据形状: {self.data.shape}")
        logger.info(f"数据列: {self.data.columns.tolist()}")
        
        # 检查数据格式并预处理
        self.preprocess_data()
        
        self.scaler_X = MinMaxScaler()
        self.features, self.labels = self.prepare_sequences(window_size)

    def preprocess_data(self):
        """预处理3D数据"""
        # 检查数据格式
        required_columns = ['num_1', 'num_2', 'num_3']
        for col in required_columns:
            if col not in self.data.columns:
                logger.error(f"数据格式错误：缺少列 {col}")
                raise ValueError(f"3D数据必须包含列 {required_columns}")
        
        # 确保有期号列
        if 'draw_issue' not in self.data.columns:
            self.data['draw_issue'] = range(len(self.data))
        
        # 保留需要的列
        self.data = self.data[['draw_issue', 'num_1', 'num_2', 'num_3']].copy()
        
        # 按期号排序
        if 'draw_issue' in self.data.columns:
            self.data['draw_issue'] = pd.to_numeric(self.data['draw_issue'], errors='coerce')
            self.data = self.data.dropna(subset=['draw_issue']).sort_values(by='draw_issue').reset_index(drop=True)
        
        logger.info(f"预处理后数据形状: {self.data.shape}")
        logger.info(f"数据样例:\n{self.data.head()}")

    def prepare_sequences(self, window_size):
        """准备序列数据"""
        features, labels = [], []
        
        # 获取号码数据（排除期号列）
        number_data = self.data[['num_1', 'num_2', 'num_3']].values
        
        # 创建带区域转换特征的时间序列数据
        X, y = [], []
        
        for i in range(len(number_data) - window_size):
            # 特征：前window_size期的数据
            sequence = number_data[i:i + window_size]  # [window_size, 3]
            
            # 添加区域转换特征
            # 0-4为小数区，5-9为大数区
            zone_transitions = []
            for pos in range(3):  # 3个位置
                transitions = []
                for j in range(1, len(sequence)):
                    prev_digit = sequence[j-1][pos]
                    curr_digit = sequence[j][pos]
                    # 记录区域转换：0表示无转换，1表示小数区到大数区，-1表示大数区到小数区
                    prev_zone = 0 if prev_digit <= 4 else 1  # 0表示小数区，1表示大数区
                    curr_zone = 0 if curr_digit <= 4 else 1
                    transition = curr_zone - prev_zone  # -1, 0, 1
                    transitions.append(transition)
                zone_transitions.append(transitions)
            
            # 将区域转换特征添加到序列中
            # 扩展序列维度以包含区域转换信息
            extended_sequence = []
            for j in range(len(sequence)):
                extended_features = []
                for pos in range(3):
                    # 原始数字
                    extended_features.append(sequence[j][pos])
                    # 如果不是第一个时间步，添加区域转换特征
                    if j > 0:
                        # 获取该位置在当前时间步的区域转换
                        transition_idx = j - 1  # transitions索引
                        if transition_idx < len(zone_transitions[pos]):
                            extended_features.append(zone_transitions[pos][transition_idx])
                        else:
                            extended_features.append(0)  # 默认无转换
                    else:
                        extended_features.append(0)  # 第一个时间步无转换
                extended_sequence.append(extended_features)
            
            X.append(np.array(extended_sequence))
            
            # 标签：下一期的数据
            target = number_data[i + window_size]  # [3]
            y.append(target)
        
        X = np.array(X)  # [num_samples, window_size, 6] (3个原始数字 + 3个区域转换特征)
        y = np.array(y)  # [num_samples, 3]
        
        # 转换为 NumPy 数组
        features_np = np.array(X)
        labels_np = np.array(y)
        
        # 对特征进行缩放
        original_shape = features_np.shape
        features_scaled = self.scaler_X.fit_transform(
            features_np.reshape(-1, features_np.shape[-1])
        ).reshape(original_shape)

        logger.info(f"特征形状: {features_scaled.shape}")
        logger.info(f"标签形状: {labels_np.shape}")
        logger.info(f"标签范围: {labels_np.min()} - {labels_np.max()}")

        return (
            torch.tensor(features_scaled, dtype=torch.float32),
            torch.tensor(labels_np, dtype=torch.long)
        )


    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]

def fetch_data_if_not_exists():
    """
    检查 CSV 文件是否存在，如果不存在，则调用 fetch_3d_data.py 获取数据
    """
    if not os.path.exists(DATA_FILE):
        logger.info(f"数据文件 {DATA_FILE} 不存在，开始获取数据...")
        fetch_script = os.path.join(current_dir, 'fetch_3d_data.py')
        if not os.path.exists(fetch_script):
            logger.error(f"数据获取脚本不存在: {fetch_script}")
            sys.exit(1)
        try:
            # 使用当前运行的 Python 解释器
            python_executable = sys.executable
            logger.info(f"运行数据获取脚本: {fetch_script} 使用解释器: {python_executable}")
            subprocess.run([python_executable, fetch_script], check=True, 
                          encoding='utf-8', errors='replace')
            logger.info("数据获取完成。")
        except subprocess.CalledProcessError as e:
            logger.error(f"运行数据获取脚本失败: {e}")
            sys.exit(1)
    else:
        logger.info(f"数据文件 {DATA_FILE} 已存在。")

def train_model():
    fetch_data_if_not_exists()

    if not os.path.exists(DATA_FILE):
        logger.error(f"数据文件不存在: {DATA_FILE}")
        sys.exit(1)

    # 数据加载
    logger.info("加载3D序列数据...")
    dataset = D3SequenceDataset(DATA_FILE, WINDOW_SIZE)
    
    # 分割训练集和验证集
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
    
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
    
    input_dim = dataset.features.shape[-1]  # 6个特征（3个数字特征+3个区域转换特征）
    output_dim = 10  # 0-9数字
    output_seq_length = 3  # 3个位置
    
    # 检查 GPU 是否可用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 初始化3D序列LSTM模型
    model = Data3DSequenceLSTM(input_dim=input_dim, hidden_dim=128, num_layers=3, dropout=0.3).to(device)
    
    # 优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
    
    # 学习率调度器
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5)
    
    # 损失函数
    criterion = torch.nn.CrossEntropyLoss()
    
    # 训练过程
    best_val_loss = float('inf')
    best_model_state = None
    trigger_times = 0
    
    logger.info("开始训练3D序列LSTM模型...")
    
    for epoch in range(EPOCHS):
        # 训练模式
        model.train()
        total_train_loss = 0
        
        for features, labels in train_loader:
            features = features.to(device)
            labels = labels.to(device)
            
            # 前向传播
            optimizer.zero_grad()
            outputs = model(features)  # [batch_size, 3, 10]
            
            # 计算损失
            loss = 0
            for i in range(3):  # 3个位置
                loss += criterion(outputs[:, i, :], labels[:, i])
            loss = loss / 3  # 平均损失
            
            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # 梯度裁剪
            optimizer.step()
            total_train_loss += loss.item()
        
        # 验证过程
        model.eval()
        total_val_loss = 0
        all_preds = []
        all_labels = []
        
        with torch.no_grad():
            for features, labels in val_loader:
                features = features.to(device)
                labels = labels.to(device)
                
                # 前向传播
                outputs = model(features)  # [batch_size, 3, 10]
                
                # 计算损失
                loss = 0
                for i in range(3):  # 3个位置
                    loss += criterion(outputs[:, i, :], labels[:, i])
                loss = loss / 3  # 平均损失
                total_val_loss += loss.item()
                
                # 收集预测结果用于准确率计算
                preds = torch.argmax(outputs, dim=-1)  # [batch_size, 3]
                all_preds.extend(preds.cpu().numpy().flatten())
                all_labels.extend(labels.cpu().numpy().flatten())
        
        # 计算平均损失
        avg_train_loss = total_train_loss / len(train_loader)
        avg_val_loss = total_val_loss / len(val_loader)
        
        # 计算准确率
        accuracy = accuracy_score(all_labels, all_preds)
        
        logger.info(f"Epoch {epoch+1}: Train Loss = {avg_train_loss:.4f}, "
                    f"Val Loss = {avg_val_loss:.4f}, Acc = {accuracy:.4f}")
        
        # 更新学习率
        scheduler.step(avg_val_loss)
        
        # 早停检查
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            best_model_state = copy.deepcopy(model.state_dict())
            trigger_times = 0
        else:
            trigger_times += 1
            if trigger_times >= PATIENCE:
                logger.info(f"早停触发，训练停止在第 {epoch+1} 轮")
                break
    
    # 保存最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
    
    # 保存模型和缩放器
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    
    torch.save({
        "model": model.state_dict()
    }, MODEL_PATH)
    
    # 保存缩放器
    joblib.dump(dataset.scaler_X, SCALER_PATH)
    
    logger.info(f"3D序列LSTM模型已保存到 {MODEL_PATH}")
    logger.info(f"缩放器已保存到 {SCALER_PATH}")

if __name__ == "__main__":
    logger.info("开始训练3D序列LSTM模型...")
    train_model()
    logger.info("3D序列LSTM模型训练完成。")