#file:g:\python\caipiao\LottoProphet\scripts\dlt\random_dlt_model.py
import os
import sys
import subprocess
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import hamming_loss, f1_score, jaccard_score  # 添加了 jaccard_score
from sklearn.model_selection import train_test_split
import joblib
from loguru import logger

# 配置 loguru
logger.remove()  # 移除默认的处理器
logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", level="INFO")

# ---------------- 配置 ----------------
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(current_dir, "dlt_history.csv")
MODEL_PATH = os.path.join(current_dir, "random_rf_dlt_model.pkl")
SCALER_PATH = os.path.join(current_dir, "random_scaler_X.pkl")
WINDOW_SIZE = 10

# 获取项目根目录
project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(project_root)

def fetch_data_if_not_exists():
    """
    检查 CSV 文件是否存在，如果不存在，则调用 fetch_dlt_data.py 获取数据
    """
    if not os.path.exists(DATA_FILE):
        logger.info(f"数据文件 {DATA_FILE} 不存在，开始获取数据...")
        fetch_script = os.path.join(current_dir, 'fetch_dlt_data.py')
        if not os.path.exists(fetch_script):
            logger.error(f"数据获取脚本不存在: {fetch_script}")
            sys.exit(1)
        try:
            # 使用当前运行的 Python 解释器
            python_executable = sys.executable
            logger.info(f"运行数据获取脚本: {fetch_script} 使用解释器: {python_executable}")
            subprocess.run([python_executable, fetch_script], check=True)
            logger.info("数据获取完成。")
        except subprocess.CalledProcessError as e:
            logger.error(f"运行数据获取脚本失败: {e}")
            sys.exit(1)
    else:
        logger.info(f"数据文件 {DATA_FILE} 已存在。")

def preprocess_data(data, window_size, red_balls=5, blue_balls=2):
    features, labels = [], []
    expected_columns = 1 + red_balls + blue_balls
    if len(data.columns) < expected_columns:
        raise ValueError(f"数据列数不足，当前列数: {len(data.columns)}，期望至少 {expected_columns} 列。")

    for i in range(len(data) - window_size):
        # 特征：选取窗口内的红球和蓝球数据
        feature_window = data.iloc[i:i + window_size, 1:1 + red_balls + blue_balls].values.flatten()
        features.append(feature_window)

        # 标签：下一期的红球和蓝球
        red_labels_seq = data.iloc[i + window_size, 1:1 + red_balls].values - 1  # 减1使其从0开始
        blue_label = data.iloc[i + window_size, 1 + red_balls:1 + red_balls + blue_balls].values - 1
        combined_labels = np.concatenate((red_labels_seq, blue_label))
        labels.append(combined_labels)

    # 转换为 NumPy 数组并进行缩放
    features_np = np.array(features)  # 形状: (num_samples, window_size * feature_dim)
    scaler_X = MinMaxScaler()
    features_scaled = scaler_X.fit_transform(features_np)

    labels_np = np.array(labels)  # 形状: (num_samples, total_labels)

    return features_scaled, labels_np, scaler_X

def evaluate_multioutput(y_true, y_pred, metric_func, average='micro'):
    """ 对每个标签分别计算指定的评估指标 """
    scores = []
    for i in range(y_true.shape[1]):
        score = metric_func(y_true[:, i], y_pred[:, i], average=average)
        scores.append(score)
    return np.mean(scores), scores

def train_model():
    fetch_data_if_not_exists()

    if not os.path.exists(DATA_FILE):
        logger.error(f"数据文件不存在: {DATA_FILE}")
        sys.exit(1)

    # 数据加载
    logger.info("加载数据...")
    data = pd.read_csv(DATA_FILE)

    # 映射列名到代码中预期的列名
    data.rename(columns={
        '红球_1': 'Red_1',
        '红球_2': 'Red_2',
        '红球_3': 'Red_3',
        '红球_4': 'Red_4',
        '红球_5': 'Red_5',
        '蓝球_1': 'Blue_1',
        '蓝球_2': 'Blue_2'
    }, inplace=True)

    # 数据预处理
    features, labels, scaler_X = preprocess_data(data, WINDOW_SIZE)

    # 打印特征和标签的形状
    logger.debug(f"Features shape: {features.shape}")
    logger.debug(f"Labels shape: {labels.shape}")

    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(features, labels, test_size=0.1, random_state=42)

    # 模型初始化
    red_model = RandomForestClassifier(n_estimators=100, random_state=42)
    blue_model = RandomForestClassifier(n_estimators=100, random_state=42)

    # 训练过程
    logger.info("开始模型训练...")

    # 训练红球模型
    red_model.fit(X_train, y_train[:, :5])
    red_preds = red_model.predict(X_val)
    
    # 计算红球模型的 F1 Score 和 Jaccard Score
    red_f1_score_value, _ = evaluate_multioutput(y_val[:, :5], red_preds, f1_score, average='micro')
    red_jaccard_score_value, _ = evaluate_multioutput(y_val[:, :5], red_preds, jaccard_score, average='micro')
    logger.info(f"红球模型训练完成，验证集 F1 Score: {red_f1_score_value:.4f}, Jaccard Score: {red_jaccard_score_value:.4f}")

    # 训练蓝球模型
    blue_model.fit(X_train, y_train[:, 5:])
    blue_preds = blue_model.predict(X_val)
    
    # 计算蓝球模型的 F1 Score 和 Jaccard Score
    blue_f1_score_value, _ = evaluate_multioutput(y_val[:, 5:], blue_preds, f1_score, average='micro')
    blue_jaccard_score_value, _ = evaluate_multioutput(y_val[:, 5:], blue_preds, jaccard_score, average='micro')
    logger.info(f"蓝球模型训练完成，验证集 F1 Score: {blue_f1_score_value:.4f}, Jaccard Score: {blue_jaccard_score_value:.4f}")

    # 打印预测结果
    logger.debug(f"Red predictions: {red_preds[:5]}")
    logger.debug(f"Blue predictions: {blue_preds[:5]}")

    # 保存模型和缩放器
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    joblib.dump({
        "red_model": red_model,
        "blue_model": blue_model
    }, MODEL_PATH)
    joblib.dump(scaler_X, SCALER_PATH)
    logger.info(f"模型已保存到 {MODEL_PATH}")
    logger.info(f"缩放器已保存到 {SCALER_PATH}")

 

def load_model_and_scaler(model_path, scaler_path):
    """ 加载模型和缩放器 """
    model = joblib.load(model_path)
    scaler_X = joblib.load(scaler_path)
    return model, scaler_X

def prepare_new_data(data, window_size, red_balls=5, blue_balls=2):
    """ 准备新数据用于预测 """
    if len(data) < window_size:
        logger.error(f"数据行数不足 {window_size} 行。当前数据行数: {len(data)}")
        return None
    
    features = []
    for i in range(len(data) - window_size + 1):
        # 特征：选取窗口内的红球和蓝球数据
        feature_window = data.iloc[i:i + window_size, 1:1 + red_balls + blue_balls].values.flatten()
        logger.debug(f"Feature window shape: {feature_window.shape}, Data: {feature_window}")
        features.append(feature_window)
    
    # 检查 features 列表是否为空
    if not features:
        logger.error("Features list is empty. Ensure there are enough data rows for the window size.")
        return None
    
    features_np = np.array(features)  # 形状: (num_samples, window_size * feature_dim)
    logger.debug(f"Features array shape: {features_np.shape}")
    return features_np

def predict_next_draw(model, scaler_X, data, window_size):
    """ 使用模型预测下一期的彩票号码 """
    features_scaled = prepare_new_data(data, window_size)
    if features_scaled is None:
        return None
    
    # 打印缩放后的特征数据
    logger.debug(f"Features scaled shape: {features_scaled.shape}")
    logger.debug(f"Features scaled data: {features_scaled[:5]}")
    
    red_model = model["red_model"]
    blue_model = model["blue_model"]
    
    # 预测红球和蓝球
    red_preds = red_model.predict(features_scaled)
    blue_preds = blue_model.predict(features_scaled)
    
    # 打印预测结果
    logger.debug(f"Red predictions: {red_preds[:5]}")
    logger.debug(f"Blue predictions: {blue_preds[:5]}")
    
    # 调整预测结果为原始范围（加1）
    red_preds = red_preds + 1
    blue_preds = blue_preds + 1
    
    # 合并预测结果
    combined_preds = np.concatenate((red_preds, blue_preds), axis=1)
    return combined_preds

if __name__ == "__main__":
    logger.info("开始训练模型...")
    # train_model()
    logger.info("模型训练完成。")
    # 配置 loguru
    logger.remove()  # 移除默认的处理器
    logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {name}:{function}:{line} - {message}", level="DEBUG")

    # 加载数据
    logger.info("加载数据...")
    data = pd.read_csv(DATA_FILE)

    # 映射列名到代码中预期的列名
    data.rename(columns={
        '红球_1': 'Red_1',
        '红球_2': 'Red_2',
        '红球_3': 'Red_3',
        '红球_4': 'Red_4',
        '红球_5': 'Red_5',
        '蓝球_1': 'Blue_1',
        '蓝球_2': 'Blue_2'
    }, inplace=True)

    # 加载模型和缩放器
    logger.info("加载模型和缩放器...")
    model, scaler_X = load_model_and_scaler(MODEL_PATH, SCALER_PATH)

    # 预测下一期的彩票号码
    logger.info("开始预测下一期的彩票号码...")
    predictions = predict_next_draw(model, scaler_X, data, WINDOW_SIZE)
    if predictions is not None:
        logger.info(f"预测结果: {predictions}")
    else:
        logger.error("预测失败。")

 
    

