import os
import sys
import subprocess
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import joblib
from loguru import logger

# 配置 loguru
logger.remove()  # 移除默认的处理器
logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", level="INFO")

# ---------------- 配置 ----------------
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(current_dir, "dlt_history.csv")
#selectedCode:g:\python\caipiao\LottoProphet\scripts\dlt\random_dlt_model.py#L20-L21
MODEL_PATH = os.path.join(current_dir, "random_rf_dlt_model.pkl")
SCALER_PATH = os.path.join(current_dir, "random_scaler_X.pkl")
WINDOW_SIZE = 10

# 获取项目根目录
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

def fetch_data_if_not_exists():
    """
    检查 CSV 文件是否存在，如果不存在，则调用 fetch_dlt_data.py 获取数据
    """
    if not os.path.exists(DATA_FILE):
        logger.info(f"数据文件 {DATA_FILE} 不存在，开始获取数据...")
        fetch_script = os.path.join(current_dir, 'fetch_dlt_data.py')
        if not os.path.exists(fetch_script):
            logger.error(f"数据获取脚本不存在: {fetch_script}")
            sys.exit(1)
        try:
            # 使用当前运行的 Python 解释器
            python_executable = sys.executable
            logger.info(f"运行数据获取脚本: {fetch_script} 使用解释器: {python_executable}")
            subprocess.run([python_executable, fetch_script], check=True)
            logger.info("数据获取完成。")
        except subprocess.CalledProcessError as e:
            logger.error(f"运行数据获取脚本失败: {e}")
            sys.exit(1)
    else:
        logger.info(f"数据文件 {DATA_FILE} 已存在。")

def preprocess_data(data, window_size, red_balls=5, blue_balls=2):
    features, labels = [], []
    expected_columns = 1 + red_balls + blue_balls
    if len(data.columns) < expected_columns:
        raise ValueError(f"数据列数不足，当前列数: {len(data.columns)}，期望至少 {expected_columns} 列。")

    for i in range(len(data) - window_size):
        # 特征：选取窗口内的红球和蓝球数据
        feature_window = data.iloc[i:i + window_size, 1:1 + red_balls + blue_balls].values.flatten()
        features.append(feature_window)

        # 标签：下一期的红球和蓝球
        red_labels_seq = data.iloc[i + window_size, 1:1 + red_balls].values - 1  # 减1使其从0开始
        blue_label = data.iloc[i + window_size, 1 + red_balls:1 + red_balls + blue_balls].values - 1
        combined_labels = np.concatenate((red_labels_seq, blue_label))
        labels.append(combined_labels)

    # 转换为 NumPy 数组并进行缩放
    features_np = np.array(features)  # 形状: (num_samples, window_size * feature_dim)
    scaler_X = MinMaxScaler()
    features_scaled = scaler_X.fit_transform(features_np)

    labels_np = np.array(labels)  # 形状: (num_samples, total_labels)

    return features_scaled, labels_np, scaler_X

def train_model():
    fetch_data_if_not_exists()

    if not os.path.exists(DATA_FILE):
        logger.error(f"数据文件不存在: {DATA_FILE}")
        sys.exit(1)

    # 数据加载
    logger.info("加载数据...")
    data = pd.read_csv(DATA_FILE)

    # 映射列名到代码中预期的列名
    data.rename(columns={
        '红球_1': 'Red_1',
        '红球_2': 'Red_2',
        '红球_3': 'Red_3',
        '红球_4': 'Red_4',
        '红球_5': 'Red_5',
        '蓝球_1': 'Blue_1',
        '蓝球_2': 'Blue_2'
    }, inplace=True)

    # 数据预处理
    features, labels, scaler_X = preprocess_data(data, WINDOW_SIZE)

    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(features, labels, test_size=0.1, random_state=42)

    # 模型初始化
    red_model = RandomForestClassifier(n_estimators=100, random_state=42)
    blue_model = RandomForestClassifier(n_estimators=100, random_state=42)

    # 训练过程
    logger.info("开始模型训练...")

    # 训练红球模型
    red_model.fit(X_train, y_train[:, :5])
    red_preds = red_model.predict(X_val)
    red_accuracy = accuracy_score(y_val[:, :5], red_preds)
    logger.info(f"红球模型训练完成，验证集准确率: {red_accuracy:.4f}")

    # 训练蓝球模型
    blue_model.fit(X_train, y_train[:, 5:])
    blue_preds = blue_model.predict(X_val)
    blue_accuracy = accuracy_score(y_val[:, 5:], blue_preds)
    logger.info(f"蓝球模型训练完成，验证集准确率: {blue_accuracy:.4f}")

    # 保存模型和缩放器
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    joblib.dump({
        "red_model": red_model,
        "blue_model": blue_model
    }, MODEL_PATH)
    joblib.dump(scaler_X, SCALER_PATH)
    logger.info(f"模型已保存到 {MODEL_PATH}")
    logger.info(f"缩放器已保存到 {SCALER_PATH}")

if __name__ == "__main__":
    logger.info("开始训练模型...")
    train_model()
    logger.info("模型训练完成。")