import joblib
import os
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier

class BaseModel:
    """
    模型基类，提供统一的接口
    """
    def __init__(self, model=None):
        self.model = model
        
    def train(self, X, y):
        """
        训练模型
        
        Parameters:
        X: 特征矩阵
        y: 目标变量
        """
        if self.model is None:
            raise ValueError("Model not initialized")
        self.model.fit(X, y)
        
    def predict(self, X):
        """
        预测
        
        Parameters:
        X: 特征矩阵
        
        Returns:
        预测结果
        """
        if self.model is None:
            raise ValueError("Model not initialized")
        return self.model.predict_proba(X)[:, 1]  # 返回B1_tag=1的概率
        
    def save_model(self, file_path):
        """
        保存模型
        
        Parameters:
        file_path: str, 模型保存路径
        """
        if self.model is None:
            raise ValueError("Model not initialized")
        joblib.dump(self.model, file_path)
        
    @classmethod
    def load_model(cls, file_path):
        """
        加载模型
        
        Parameters:
        file_path: str, 模型文件路径
        
        Returns:
        加载的模型实例
        """
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"模型文件不存在: {file_path}")
        model = joblib.load(file_path)  # 修正：使用 load 而不是 dump
        return cls(model)

class RandomForestModel(BaseModel):
    """
    随机森林模型
    """
    def __init__(self, n_estimators=100, random_state=42):
        super().__init__(RandomForestClassifier(n_estimators=n_estimators, random_state=random_state))

class LogisticRegressionModel(BaseModel):
    """
    逻辑回归模型
    """
    def __init__(self, C=1.0, max_iter=1000, random_state=42):
        super().__init__(LogisticRegression(C=C, max_iter=max_iter, random_state=random_state))

class SVMModel(BaseModel):
    """
    支持向量机模型
    """
    def __init__(self, C=1.0, kernel='rbf', probability=True, random_state=42):
        super().__init__(SVC(C=C, kernel=kernel, probability=probability, random_state=random_state))

class NeuralNetworkModel(BaseModel):
    """
    神经网络模型
    """
    def __init__(self, hidden_layer_sizes=(100,), max_iter=200, random_state=42):
        super().__init__(MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, max_iter=max_iter, random_state=random_state))

class ModelFactory:
    """
    模型工厂类，用于创建不同类型的模型
    """
    @staticmethod
    def create_model(model_type, **kwargs):
        """
        创建指定类型的模型
        
        Parameters:
        model_type: str, 模型类型 ('rf' - 随机森林, 'lr' - 逻辑回归, 'svm' - 支持向量机, 'nn' - 神经网络)
        kwargs: 模型参数
        
        Returns:
        创建的模型实例
        """
        if model_type == 'rf':
            return RandomForestModel(**kwargs)
        elif model_type == 'lr':
            return LogisticRegressionModel(**kwargs)
        elif model_type == 'svm':
            return SVMModel(**kwargs)
        elif model_type == 'nn':
            return NeuralNetworkModel(**kwargs)
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_auc_score, roc_curve
from sklearn.preprocessing import StandardScaler
import os
import sys
import matplotlib.pyplot as plt

# 添加当前目录到系统路径，以便导入model_factory
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from model_factory import ModelFactory

def plot_roc_curve(y_true, y_pred_proba):
    """
    绘制ROC曲线
    
    Parameters:
    y_true: 真实标签
    y_pred_proba: 预测概率
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
    auc_score = roc_auc_score(y_true, y_pred_proba)
    
    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {auc_score:.2f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc="lower right")
    plt.grid(True)
    plt.show()

class B1FeatureExtractor:
    """
    B1特征提取器
    """
    def __init__(self, sequence_length=15):
        self.sequence_length = sequence_length
        # 更新特征列为开发记录中提到的新特征
        self.feature_cols = ['j', 'Vol_x_Price_Change', 'Relative_Volume', 'ATR', 'ATR_percentage', 'MA_Trend_Signal', 'Close_Change_Rate', 'Volume_Change_Rate', 'dif']
        self.scaler = StandardScaler()
        
    def prepare_features(self, df, target_col='B1_tag'):
        """
        准备特征数据，使用过去15天的数据作为特征
        
        Parameters:
        df: DataFrame, 包含股票数据的DataFrame
        target_col: str, 目标列名
        
        Returns:
        X: 特征矩阵
        y: 目标变量
        """
        # 只选择J值<10的记录作为训练数据，并且从MA_Trend_Signal开始出现非0值之后开始加载训练集
        df_filtered = df[(df['j'] < 10) & (df['MA_Trend_Signal'] != 0)].copy()
        
        # 确保有足够的历史数据
        df_filtered = df_filtered.iloc[self.sequence_length-1:].copy()
        
        if len(df_filtered) == 0:
            return np.array([]), np.array([])
        
        X = []
        y = []
        
        for i in range(len(df_filtered)):
            # 获取当前记录在原数据中的索引
            original_idx = df.index.get_loc(df_filtered.index[i])
            
            # 确保有足够的历史数据
            if original_idx >= self.sequence_length - 1:
                # 提取过去15天的数据作为特征
                sequence_data = df.iloc[original_idx-self.sequence_length+1:original_idx+1][self.feature_cols]
                
                # 处理缺失值并发出警告
                if sequence_data.isnull().any().any():
                    print(f"警告：发现缺失值，使用前向填充和后向填充处理，当前数据: {sequence_data}")
                    sequence_data = sequence_data.ffill().bfill().fillna(0)  # 符合数据处理规范8.1
                
                # 计算一些技术指标作为额外特征
                features = self._extract_features(sequence_data)
                X.append(features)
                
                # 目标变量
                y.append(df_filtered.iloc[i][target_col])
        
        X = np.array(X)
        y = np.array(y)
        
        # 标准化特征
        if len(X) > 0:
            X = self.scaler.fit_transform(X)
        
        return X, y
    
    def _extract_features(self, sequence_data):
        """
        从序列数据中提取特征
        
        Parameters:
        sequence_data: DataFrame, 序列数据
        
        Returns:
        features: np.array, 提取的特征
        """
        # 原始数据展平
        raw_features = sequence_data.values.flatten()
        
        # 计算统计特征
        j_values = sequence_data['j'].values
        vol_x_price_changes = sequence_data['Vol_x_Price_Change'].values
        relative_volumes = sequence_data['Relative_Volume'].values
        atr_values = sequence_data['ATR'].values
        atr_percentage_values = sequence_data['ATR_percentage'].values
        ma_trend_signals = sequence_data['MA_Trend_Signal'].values
        price_changes = sequence_data['Close_Change_Rate'].values
        volume_changes = sequence_data['Volume_Change_Rate'].values
        
        # 将统计特征计算提取为独立方法
        def calculate_stats(values):
            return [
                np.mean(values), np.std(values),
                np.max(values), np.min(values),
                np.median(values), np.quantile(values, 0.25), np.quantile(values, 0.75)
            ]  # 符合特征工程规范7.1-7.3
        
        # 使用模块化方法计算统计特征
        features = np.concatenate([
            raw_features,
            calculate_stats(j_values),
            calculate_stats(vol_x_price_changes),
            calculate_stats(relative_volumes),
            calculate_stats(atr_values),
            calculate_stats(atr_percentage_values),
            calculate_stats(ma_trend_signals),
            calculate_stats(price_changes),
            calculate_stats(volume_changes)
        ])  # 符合特征工程规范3.1-3.3
        return features

def load_stock_data(file_path):
    """
    加载股票数据并验证必要列
    
    Parameters:
    file_path: str, 数据文件路径
    
    Returns:
    DataFrame: 股票数据
    """
    df = pd.read_csv(file_path)
    
    # 确保日期列是datetime类型
    if 'date' in df.columns:
        df['date'] = pd.to_datetime(df['date'])
    else:
        raise ValueError("数据文件缺少必要列: date")  # 符合数据处理规范5.1
        
    # 按日期排序
    df = df.sort_values('date').reset_index(drop=True)
    
    # 验证必要列
    required_columns = ['date', 'j', 'Vol_x_Price_Change', 'Relative_Volume', 
                      'ATR', 'ATR_percentage', 'MA_Trend_Signal', 'Close_Change_Rate', 'Volume_Change_Rate']
    missing_cols = [col for col in required_columns if col not in df.columns]
    if missing_cols:
        raise ValueError(f"数据文件缺失必要列: {missing_cols}")  # 符合数据处理规范5.1
    
    return df

def evaluate_model(y_true, y_pred_proba, threshold=0.5):
    """
    评估模型性能
    
    Parameters:
    y_true: 真实标签
    y_pred_proba: 预测概率
    threshold: 分类阈值
    """
    # 将概率转换为类别预测
    y_pred = (y_pred_proba > threshold).astype(int)
    
    # 计算评估指标
    accuracy = accuracy_score(y_true, y_pred)
    auc_score = roc_auc_score(y_true, y_pred_proba) if len(np.unique(y_true)) > 1 else 0
    
    print(f"模型准确率: {accuracy:.4f}")
    print(f"AUC Score: {auc_score:.4f}")
    
    print("\n分类报告:")
    print(classification_report(y_true, y_pred, target_names=['其他', 'B1_tag=1']))
    
    print("\n混淆矩阵:")
    print(confusion_matrix(y_true, y_pred))
    
    return accuracy, auc_score

def main():
    """
    主函数，使用华天科技的数据训练模型
    """
    # 确保模型目录存在
    os.makedirs(os.path.join("b1_model"), exist_ok=True)  # 使用同级目录
    
    # 数据文件路径
    data_file = os.path.join("..", "tdx_data_process", "002185_华天科技_daily.csv")
    
    # 检查数据文件是否存在
    if not os.path.exists(data_file):
        print(f"数据文件不存在: {data_file}")
        return
    
    # 加载数据
    print("加载股票数据...")
    df = load_stock_data(data_file)
    print(f"数据加载完成，共{len(df)}条记录")
    
    # 验证特征列（已在load_stock_data中验证，此处保留双重验证）
    required_columns = ['j', 'Vol_x_Price_Change', 'Relative_Volume', 'ATR', 
                       'ATR_percentage', 'MA_Trend_Signal', 'Close_Change_Rate', 'Volume_Change_Rate']
    missing_cols = [col for col in required_columns if col not in df.columns]
    if missing_cols:
        raise ValueError(f"缺失必要列: {missing_cols}")  # 符合数据处理规范1.1和5.1

    # 修改数据过滤逻辑
    first_signal_idx = df['MA_Trend_Signal'].ne(0).idxmax() if df['MA_Trend_Signal'].any() else 0
    df_filtered = df[(df['j'] < 10) & (df.index >= first_signal_idx)].copy()  # 符合数据过滤规范3.1

    # 初始化特征提取器
    feature_extractor = B1FeatureExtractor(sequence_length=15)
    
    # 准备特征数据
    print("准备特征数据...")
    X, y = feature_extractor.prepare_features(df, target_col='B1_tag')
    
    if len(X) == 0 or len(y) == 0:
        print("没有足够的数据进行训练")
        return
    
    # 将目标变量转换为二分类问题：1表示B1_tag为1，0表示其他情况
    y_binary = (y == 1).astype(int)
    
    print(f"特征数据准备完成，X.shape: {X.shape}, y.shape: {y.shape}")
    print(f"B1_tag分布: 1的数量: {np.sum(y_binary==1)}, 0的数量: {np.sum(y_binary==0)}")
    
    # 分割训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y_binary, test_size=0.2, random_state=42)
    
    # 创建并训练不同类型的模型
    model_types = ['rf', 'lr', 'svm', 'nn']  # 支持多种模型类型
    results = {}
    
    for model_type in model_types:
        print(f"\n{'='*50}")
        print(f"训练 {model_type.upper()} 模型...")
        print(f"{'='*50}")
        
        # 创建模型
        if model_type == 'rf':
            model = ModelFactory.create_model(model_type, n_estimators=100)
        elif model_type == 'nn':
            model = ModelFactory.create_model(model_type, hidden_layer_sizes=(64, 32), max_iter=500)
        else:
            model = ModelFactory.create_model(model_type)
        
        # 训练模型
        model.train(X_train, y_train)
        
        # 在测试集上进行预测
        y_pred_proba = model.predict(X_test)
        
        # 评估模型
        accuracy, auc_score = evaluate_model(y_test, y_pred_proba)
        results[model_type] = {'accuracy': accuracy, 'auc': auc_score}
        
        # 保存模型
        model_path = os.path.join("b1_model", f"b1_{model_type}_model.pkl")
        model.save_model(model_path)
        print(f"模型已保存到: {model_path}")
    
    # 保存scaler
    scaler_path = os.path.join("b1_model", "b1_scaler.pkl")
    joblib.dump(feature_extractor.scaler, scaler_path)  # 符合模型文件管理规范1.1

    # 输出模型比较结果
    print(f"\n{'='*50}")
    print("模型比较结果:")
    print(f"{'='*50}")
    for model_type, metrics in results.items():
        print(f"{model_type.upper()}: 准确率={metrics['accuracy']:.4f}, AUC={metrics['auc']:.4f}")

if __name__ == "__main__":
    main()