import pandas as pd
import numpy as np
import os
from datetime import datetime
import sys
import joblib

# 添加当前目录到系统路径，以便导入自定义模块
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from B1_train import B1FeatureExtractor

# 定义参与评分的最佳模型配置
BEST_MODELS_CONFIG = {
    'rf': {
        'name': '随机森林',
        'model_path': os.path.join("b1_model_best", "b1_rf_best_model.pkl"),
        'scaler_path': os.path.join("b1_model_best", "b1_rf_best_scaler.pkl")
    },
    'lr': {
        'name': '逻辑回归',
        'model_path': os.path.join("b1_model_best", "b1_lr_best_model.pkl"),
        'scaler_path': os.path.join("b1_model_best", "b1_lr_best_scaler.pkl")
    },
    'svm': {
        'name': '支持向量机',
        'model_path': os.path.join("b1_model_best", "b1_svm_best_model.pkl"),
        'scaler_path': os.path.join("b1_model_best", "b1_svm_best_scaler.pkl")
    },
    'nn': {
        'name': '神经网络',
        'model_path': os.path.join("b1_model_best", "b1_nn_best_model.pkl"),
        'scaler_path': os.path.join("b1_model_best", "b1_nn_best_scaler.pkl")
    }
}

def load_model(model_path, scaler_path=None):
    """
    加载已训练的模型和标准化器
    
    Parameters:
    model_path: str, 模型文件路径
    scaler_path: str, 标准化器文件路径（可选）
    
    Returns:
    model: 加载的模型对象
    scaler: 加载的标准化器对象（如果提供scaler_path）
    """
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"模型文件不存在: {model_path}")
    
    # 直接加载模型（sklearn模型）
    model = joblib.load(model_path)
    
    if scaler_path:
        if not os.path.exists(scaler_path):
            raise FileNotFoundError(f"标准化器文件不存在: {scaler_path}")
        scaler = joblib.load(scaler_path)
        return model, scaler
    else:
        return model, None

def filter_stocks_by_date_and_j(data_dir, target_date, j_threshold=10):
    """
    筛选指定日期J值小于阈值的股票
    
    Parameters:
    data_dir: str, 数据目录路径
    target_date: str, 目标日期，格式为'YYYY-MM-DD'
    j_threshold: float, J值阈值
    
    Returns:
    filtered_stocks: list, 符合条件的股票数据列表
    """
    stock_files = [f for f in os.listdir(data_dir) if f.endswith('.csv')]
    
    filtered_stocks = []
    
    for file in stock_files:
        file_path = os.path.join(data_dir, file)
        try:
            df = pd.read_csv(file_path)
            df['date'] = pd.to_datetime(df['date'])
            
            # 筛选指定日期且J值小于阈值的记录
            target_row = df[df['date'] == target_date]
            if not target_row.empty and target_row['j'].values[0] < j_threshold:
                stock_info = {
                    'file': file,
                    'code': file.split('_')[0],
                    'name': file.split('_')[1].replace('_daily.csv', ''),
                    'data': target_row.iloc[0]
                }
                filtered_stocks.append(stock_info)
        except Exception as e:
            print(f"处理文件 {file} 时出错: {e}")
    
    print(f"找到 {len(filtered_stocks)} 只J值小于 {j_threshold} 的股票")
    return filtered_stocks

def prepare_features_for_prediction(filtered_stocks, data_dir, sequence_length=15):
    """
    为预测准备特征数据
    
    Parameters:
    filtered_stocks: list, 筛选后的股票列表
    data_dir: str, 数据目录路径
    sequence_length: int, 序列长度
    
    Returns:
    stock_features: list, 包含特征数据的股票列表
    """
    # 注意：这里不使用训练时的scaler来标准化数据，而是直接提取特征
    # 在预测阶段，使用训练时保存的scaler进行标准化
    
    feature_extractor = B1FeatureExtractor(sequence_length=sequence_length)
    
    stock_features = []
    
    for stock in filtered_stocks:
        try:
            file_path = os.path.join(data_dir, stock['file'])
            df = pd.read_csv(file_path)
            df['date'] = pd.to_datetime(df['date'])
            df = df.sort_values('date').reset_index(drop=True)
            
            # 找到目标日期在DataFrame中的位置
            target_date = stock['data']['date']
            target_idx = df[df['date'] == target_date].index[0]
            
            # 确保有足够的历史数据
            if target_idx >= sequence_length - 1:
                # 提取序列数据
                sequence_data = df.iloc[target_idx-sequence_length+1:target_idx+1]
                
                # 处理缺失值
                sequence_data_filtered = sequence_data[feature_extractor.feature_cols].ffill().bfill().fillna(0)
                
                # 提取特征（注意：这里不进行标准化，标准化将在预测时使用训练时保存的scaler进行）
                features = feature_extractor._extract_features(sequence_data_filtered)
                
                stock_with_features = stock.copy()
                stock_with_features['features'] = features
                stock_features.append(stock_with_features)
        except Exception as e:
            print(f"处理股票 {stock['code']} {stock['name']} 时出错: {e}")
    
    print(f"成功为 {len(stock_features)} 只股票准备特征数据")
    return stock_features

def backtest_model_accuracy(model, scaler, stock_file, data_dir, sequence_length=15):
    """
    回测模型在指定股票上的历史准确率
    
    Parameters:
    model: 训练好的模型
    scaler: 特征标准化器
    stock_file: 股票文件名
    data_dir: 数据目录路径
    sequence_length: 序列长度
    
    Returns:
    accuracy: 模型在该股票上的历史准确率
    """
    try:
        # 加载股票数据
        file_path = os.path.join(data_dir, stock_file)
        df = pd.read_csv(file_path)
        df['date'] = pd.to_datetime(df['date'])
        df = df.sort_values('date').reset_index(drop=True)
        
        # 创建特征提取器
        feature_extractor = B1FeatureExtractor(sequence_length=sequence_length)
        
        # 准备特征数据
        X, y = feature_extractor.prepare_features(df)
        
        if len(X) == 0 or len(y) == 0:
            return None
        
        # 使用训练时的scaler进行标准化
        X_scaled = scaler.transform(X)
        
        # 进行预测
        y_pred_proba = model.predict(X_scaled)
        y_pred = (y_pred_proba > 0.5).astype(int)
        
        # 将目标变量转换为二分类问题：1表示B1_tag为1，0表示其他情况
        y_binary = (y == 1).astype(int)
        
        # 计算准确率并保留两位小数
        accuracy = np.mean(y_pred == y_binary)
        return round(accuracy, 2)
    except Exception as e:
        print(f"回测股票 {stock_file} 时出错: {e}")
        return None

def predict_with_model(model, stock_features, scaler, data_dir, model_name="model"):
    """
    使用模型进行预测
    
    Parameters:
    model: 训练好的模型
    stock_features: list, 包含特征数据的股票列表
    scaler: 特征标准化器
    data_dir: 数据目录路径
    model_name: 模型名称，用于标识预测结果列名
    
    Returns:
    predictions: list, 包含预测结果的股票列表
    """
    predictions = []
    
    for stock in stock_features:
        try:
            features = stock['features']
            # 使用训练时的scaler进行标准化
            features_scaled = scaler.transform(features.reshape(1, -1))
            
            # 进行预测
            # 使用model.predict方法，这是model_factory中定义的方法，返回的是B1_tag=1的概率
            pred_proba = model.predict(features_scaled)[0]
            
            # 回测模型在该股票上的历史准确率
            backtest_accuracy = None
            # if model_name == "all_stocks":
            backtest_accuracy = backtest_model_accuracy(model, scaler, stock['file'], data_dir)
            
            stock_with_pred = stock.copy()
            stock_with_pred[f'{model_name}_prediction'] = pred_proba
            if backtest_accuracy is not None:
                stock_with_pred[f'{model_name}_backtest_accuracy'] = backtest_accuracy
            predictions.append(stock_with_pred)
        except Exception as e:
            print(f"预测股票 {stock['code']} {stock['name']} 时出错: {e}")
            # 即使某个股票预测出错，也要确保其他股票能继续预测
            stock_with_pred = stock.copy()
            stock_with_pred[f'{model_name}_prediction'] = None
            predictions.append(stock_with_pred)
    
    return predictions

def predict_with_individual_model(stock_features, data_dir, individual_model_dir, sequence_length=15):
    """
    使用个股模型进行预测
    
    Parameters:
    stock_features: list, 包含特征数据的股票列表
    data_dir: 数据目录路径
    individual_model_dir: 个股模型目录路径
    sequence_length: 序列长度
    
    Returns:
    predictions: list, 包含预测结果的股票列表
    """
    predictions = []
    
    for stock in stock_features:
        try:
            stock_code = stock['code']
            model_file = f"{stock_code}_b1_rf_model.pkl"
            scaler_file = f"{stock_code}_b1_rf_scaler.pkl"
            
            model_path = os.path.join(individual_model_dir, model_file)
            scaler_path = os.path.join(individual_model_dir, scaler_file)
            
            # 检查个股模型是否存在
            if not os.path.exists(model_path) or not os.path.exists(scaler_path):
                # 如果个股模型不存在，使用None作为预测值
                stock_with_pred = stock.copy()
                stock_with_pred['individual_prediction'] = None
                stock_with_pred['individual_backtest_accuracy'] = None
                predictions.append(stock_with_pred)
                continue
            
            # 加载个股模型和scaler
            model, scaler = load_model(model_path, scaler_path)
            
            # 进行预测
            features = stock['features']
            # 确保features是正确的形状
            if len(features.shape) == 1:
                features = features.reshape(1, -1)
            features_scaled = scaler.transform(features)
            pred_proba = model.predict(features_scaled)[0]
            
            # 回测模型在该股票上的历史准确率
            backtest_accuracy = backtest_model_accuracy(model, scaler, stock['file'], data_dir, sequence_length)
            
            stock_with_pred = stock.copy()
            stock_with_pred['individual_prediction'] = pred_proba
            if backtest_accuracy is not None:
                stock_with_pred['individual_backtest_accuracy'] = backtest_accuracy
            else:
                stock_with_pred['individual_backtest_accuracy'] = None
            predictions.append(stock_with_pred)
            
        except Exception as e:
            print(f"使用个股模型预测股票 {stock['code']} {stock['name']} 时出错: {e}")
            # 即使某个股票预测出错，也要确保其他股票能继续预测
            stock_with_pred = stock.copy()
            stock_with_pred['individual_prediction'] = None
            stock_with_pred['individual_backtest_accuracy'] = None
            predictions.append(stock_with_pred)
    
    return predictions

def predict_with_best_models(stock_features, data_dir):
    """
    使用所有最佳模型进行预测
    
    Parameters:
    stock_features: list, 包含特征数据的股票列表
    data_dir: 数据目录路径
    
    Returns:
    predictions: list, 包含所有模型预测结果的股票列表
    """
    predictions = stock_features
    
    # 遍历所有最佳模型进行预测
    for model_key, model_config in BEST_MODELS_CONFIG.items():
        try:
            print(f"使用{model_config['name']}模型进行预测...")
            model, scaler = load_model(model_config['model_path'], model_config['scaler_path'])
            predictions = predict_with_model(model, predictions, scaler, data_dir, model_key)
        except Exception as e:
            print(f"加载或预测{model_config['name']}模型时出错: {e}")
            # 如果某个模型出错，继续处理其他模型
            continue
    
    return predictions

def calculate_composite_score(predictions):
    """
    计算综合评分：各模型(预测概率*历史准确率)的加权平均
    
    Parameters:
    predictions: list, 包含预测结果的股票列表
    
    Returns:
    predictions_with_score: list, 包含综合评分的股票列表
    """
    predictions_with_score = []
    
    for stock in predictions:
        try:
            # 收集所有模型的预测值和历史准确率
            weighted_sum = 0.0
            accuracy_sum = 0.0
            valid_models = 0
            
            for model_key in BEST_MODELS_CONFIG.keys():
                pred = stock.get(f'{model_key}_prediction', None)
                acc = stock.get(f'{model_key}_backtest_accuracy', None)
                
                # 只有当预测值和准确率都存在且有效时才参与计算
                if (pred is not None and acc is not None and 
                    not np.isnan(pred) and not np.isnan(acc)):
                    weighted_sum += pred * acc
                    accuracy_sum += acc
                    valid_models += 1
            
            # 计算综合评分（加权平均）
            composite_score = None
            if valid_models > 0 and accuracy_sum > 0:
                composite_score = weighted_sum / accuracy_sum
                composite_score = round(composite_score, 4)
            
            stock_with_score = stock.copy()
            stock_with_score['composite_score'] = composite_score
            stock_with_score['valid_models_count'] = valid_models
            predictions_with_score.append(stock_with_score)
        except Exception as e:
            print(f"计算股票 {stock['code']} {stock['name']} 综合评分时出错: {e}")
            stock_with_score = stock.copy()
            stock_with_score['composite_score'] = None
            stock_with_score['valid_models_count'] = 0
            predictions_with_score.append(stock_with_score)
    
    return predictions_with_score

def save_predictions_to_csv(predictions, output_file):
    """
    将预测结果保存到CSV文件
    
    Parameters:
    predictions: list, 包含预测结果的股票列表
    output_file: str, 输出文件路径
    """
    # 按预测概率降序排序
    predictions_sorted = sorted(predictions, key=lambda x: x.get('composite_score', 0) if x.get('composite_score') is not None else 0, reverse=True)
    
    # 准备输出数据
    output_data = []
    for stock in predictions_sorted:
        # 获取B1_tag，如果不存在则设为"N/A"
        b1_tag = stock['data'].get('B1_tag', 'N/A') if 'data' in stock and 'B1_tag' in stock['data'] else 'N/A'
        
        # 处理综合评分，保留4位小数
        composite_score = stock.get('composite_score', 'N/A')
        if composite_score != 'N/A' and composite_score is not None and not np.isnan(composite_score):
            composite_score = round(composite_score, 4)
        
        # 准备模型预测结果
        model_predictions = {}
        for model_key, model_config in BEST_MODELS_CONFIG.items():
            pred = stock.get(f'{model_key}_prediction', 'N/A')
            acc = stock.get(f'{model_key}_backtest_accuracy', 'N/A')
            
            if pred != 'N/A' and pred is not None and not np.isnan(pred):
                pred = round(pred, 4)
            if acc != 'N/A' and acc is not None and not np.isnan(acc):
                acc = round(acc, 4)
                
            model_predictions[f'{model_key}_prediction'] = pred
            model_predictions[f'{model_key}_backtest_accuracy'] = acc
        
        output_data.append({
            'code': stock['code'],
            'name': stock['name'],
            'date': stock['data']['date'],
            'j_value': stock['data']['j'],
            'B1_tag': b1_tag,
            **model_predictions,
            'individual_prediction': stock.get('individual_prediction', 'N/A'),
            'individual_backtest_accuracy': stock.get('individual_backtest_accuracy', 'N/A'),
            'composite_score': composite_score,
            'valid_models_count': stock.get('valid_models_count', 0)
        })
    
    # 保存到CSV文件
    df_output = pd.DataFrame(output_data)
    df_output.to_csv(output_file, index=False, encoding='utf-8-sig')
    print(f"预测结果已保存到: {output_file}")

def predict_stocks_for_date(target_date):
    """
    为指定日期预测股票
    
    Parameters:
    target_date: str, 目标日期，格式为'YYYY-MM-DD'
    """
    # 配置参数
    data_dir = os.path.join("..", "tdx_data_process")
    individual_model_dir = os.path.join("b1_model_every")
    # 创建predictions文件夹用于保存预测结果
    predictions_dir = "predictions"
    os.makedirs(predictions_dir, exist_ok=True)
    output_file = os.path.join(predictions_dir, f"predictions_{target_date.replace('-', '')}.csv")
    
    print(f"开始处理 {target_date} 的股票数据")
    print("=" * 50)
    
    # 筛选符合条件的股票
    print(f"筛选 {target_date} J值小于10的股票...")
    filtered_stocks = filter_stocks_by_date_and_j(data_dir, target_date, j_threshold=10)
    
    if not filtered_stocks:
        print("未找到符合条件的股票")
        return
    
    # 准备特征数据
    print("准备特征数据...")
    stock_features = prepare_features_for_prediction(filtered_stocks, data_dir)
    
    if not stock_features:
        print("没有成功准备特征数据的股票")
        return
    
    # 使用所有最佳模型进行预测
    print("使用所有最佳模型进行预测...")
    best_model_predictions = predict_with_best_models(stock_features, data_dir)
    
    if not best_model_predictions:
        print("没有成功预测的股票")
        return
    
    # 使用个股模型进行预测和回测
    print("使用个股模型进行预测和回测...")
    individual_predictions = predict_with_individual_model(best_model_predictions, data_dir, individual_model_dir)
    
    # 计算综合评分
    print("计算综合评分...")
    final_predictions = calculate_composite_score(individual_predictions)
    
    # 保存结果
    print("保存预测结果...")
    save_predictions_to_csv(final_predictions, output_file)
    
    # 显示前10个预测结果
    print("\n前10个预测结果:")
    print("-" * 150)
    predictions_sorted = sorted(final_predictions, key=lambda x: x.get('composite_score', 0) if x.get('composite_score') is not None else 0, reverse=True)
    for i, stock in enumerate(predictions_sorted[:10]):
        # 显示基本信息
        composite_score = stock.get('composite_score', None)
        valid_models = stock.get('valid_models_count', 0)
        composite_score_str = f"{composite_score:.4f}" if composite_score is not None and not np.isnan(composite_score) else "N/A"
        
        print(f"{i+1:2d}. {stock['code']} {stock['name']:<8} "
              f"J值: {stock['data']['j']:.2f} "
              f"综合评分: {composite_score_str} "
              f"(有效模型数: {valid_models})")
        
        # 显示各模型预测结果
        for model_key, model_config in BEST_MODELS_CONFIG.items():
            pred = stock.get(f'{model_key}_prediction', None)
            acc = stock.get(f'{model_key}_backtest_accuracy', None)
            
            pred_str = f"{pred:.4f}" if pred is not None and not np.isnan(pred) else "N/A"
            acc_str = f"{acc:.4f}" if acc is not None and not np.isnan(acc) else "N/A"
            
            print(f"     {model_config['name']}: 预测={pred_str}, 准确率={acc_str}")
        
        # 显示个股模型结果
        individual_pred = stock.get('individual_prediction', None)
        individual_acc = stock.get('individual_backtest_accuracy', None)
        
        individual_pred_str = f"{individual_pred:.4f}" if individual_pred is not None and not np.isnan(individual_pred) else "N/A"
        individual_acc_str = f"{individual_acc:.4f}" if individual_acc is not None and not np.isnan(individual_acc) else "N/A"
        
        print(f"     个股模型: 预测={individual_pred_str}, 准确率={individual_acc_str}")

def main():
    """
    主函数
    """
    target_date = "2025-08-18"
    predict_stocks_for_date(target_date)

if __name__ == "__main__":
    main()