import numpy as np
from sklearn.metrics import r2_score
from typing import Dict, List, Tuple
import pandas as pd

def evaluate_model(y_true: np.ndarray, y_pred: np.ndarray) -> float:
    """评估模型性能，返回R2分数"""
    return r2_score(y_true, y_pred)

def run_single_evaluation(df: pd.DataFrame, feature_engineering: callable, random_state: int = 42) -> Tuple[float, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    """运行单个预测方法并评估
    
    Returns:
        score: R2分数
        y_test: 测试集实际值
        predictions: 预测值
        test_dates: 测试集日期
        y_test_full: 完整的测试集实际值（用于展示）
    """
    from .model_utils import train_model, predict
    
    # 特征工程
    X, y = feature_engineering(df)

    # 训练预测评估
    model, X_train, X_test, y_train, y_test = train_model(X, y, random_state=random_state)
    predictions = predict(model, X_test)
    score = evaluate_model(y_test, predictions)
    
    # 获取测试集日期
    dates = pd.to_datetime(df.iloc[:, 0])
    test_indices = np.arange(len(dates))[len(X_train):len(X_train)+len(X_test)]
    test_dates = dates.iloc[test_indices]
    
    return score, y_test, predictions, test_dates, df.iloc[test_indices, 1].values

def run_benchmark_evaluation(df: pd.DataFrame, feature_engineering: callable, random_states: List[int]) -> List[float]:
    """运行基准测试评估"""
    scores = []
    for state in random_states:
        score, _, _, _, _ = run_single_evaluation(df, feature_engineering, state)
        scores.append(score)
    return scores

def print_benchmark_results(results: Dict[str, List[float]], n_trials: int) -> None:
    """打印基准测试结果"""
    df_results = pd.DataFrame(results)
    
    print(f"\nbenchmark ({n_trials} times):")
    print("\n平均分数:")
    print(df_results.mean().round(4))
    print("\n标准差:")
    print(df_results.std().round(4))
    print("\n最高分数:")
    print(df_results.max().round(4))
    print("\n最低分数:")
    print(df_results.min().round(4))
    
    best_method = df_results.mean().idxmax()
    best_score = df_results.mean().max()
    print(f"\n最佳方法: {best_method} (平均分数: {best_score:.4f})")