import os
from time import time

import joblib
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from tqdm import tqdm
from imblearn.over_sampling import RandomOverSampler
from 多因子.框架 import 数据预处理


def train_model(feature_dfs, features, stock_codes, TRAIN_MODE):
    """返回可直接用于预测的模型对象"""

    # ================== 联合训练模式 ==================
    if TRAIN_MODE == 'combined':
        # 合并数据
        full_df = pd.concat(feature_dfs, ignore_index=True)

        # 数据分割
        X = full_df[features]
        y = full_df['target']
        split_idx = int(len(X) * 0.8)
        X_train, X_test = X.iloc[:split_idx], X.iloc[split_idx:]
        y_train, y_test = y.iloc[:split_idx], y.iloc[split_idx:]

        # 标准化
        scaler = StandardScaler().fit(X_train)
        X_train_scaled = scaler.transform(X_train)

        # 训练模型
        model = SVC(
            probability=True,
            class_weight='balanced',
            kernel='rbf',
            C=1.0,
            random_state=42
        ).fit(X_train_scaled, y_train)
        model.scaler_ = scaler
        # 为模型添加元数据
        model.metadata = {
            'mode': 'combined',
            'scaler': scaler,
            'features': features,
            'train_size': len(X_train)
        }
        return model

    # ================== 独立训练模式 ==================
    elif TRAIN_MODE == 'individual':
        trained_models = {}

        for code, df in zip(stock_codes, feature_dfs):
            try:
                # 数据准备
                X = df[features]
                y = df['target']

                # 数据分割
                split_idx = int(len(X) * 0.8)
                X_train, X_test = X.iloc[:split_idx], X.iloc[split_idx:]
                y_train = y.iloc[:split_idx]

                # 标准化
                scaler = StandardScaler().fit(X_train)
                X_train_scaled = scaler.transform(X_train)

                # 训练模型
                model = SVC(
                    probability=True,
                    class_weight='balanced',
                    kernel='rbf',
                    C=1.0,
                    random_state=42
                ).fit(X_train_scaled, y_train)

                # 添加元数据
                model.scaler_ = scaler

                trained_models[code] = model

            except Exception as e:
                print(f"训练 {code} 失败: {str(e)}")
                continue

        return trained_models

    else:
        raise ValueError("无效模式，可选 'combined' 或 'individual'")


import os
import joblib
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler


class SVMModel:
    def __init__(self, features, config=None):
        """
        重构要点：
        1. 移除与PyTorch的耦合
        2. 增强特征名称安全机制
        3. 优化数据校验流程
        """
        # 配置合并
        self.config = {
            "model_path": "./model/svm_model.pkl",
            "scaler_path": "./model/svm_scaler.pkl",
            "seq_length": 30,
            "kernel": 'rbf',
            "C": 1.0,
            "class_weight": 'balanced',
            "probability": True,
            "random_state": 42,
            "feature_verify": True  # 新增特征校验开关
        }
        if config:
            self.config.update(config)

        self.features = features
        self.model = None
        self.scaler = StandardScaler()
        self._load_if_exists()

    def _load_if_exists(self):
        """安全加载机制"""
        try:
            if os.path.exists(self.config['model_path']):
                print(f"加载预训练模型: {self.config['model_path']}")
                self.model = joblib.load(self.config['model_path'])
                self.scaler = joblib.load(self.config['scaler_path'])
                # 加载后校验特征一致性
                if hasattr(self.scaler, 'feature_names_in_'):
                    if list(self.scaler.feature_names_in_) != self.features:
                        raise ValueError("加载的scaler特征与当前配置不匹配")
        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            self.model = None
            self.scaler = StandardScaler()

    def _validate_features(self, data):
        """特征校验核心方法"""
        if self.config['feature_verify']:
            if isinstance(data, pd.DataFrame):
                missing = set(self.features) - set(data.columns)
                if missing:
                    raise ValueError(f"缺失特征列: {missing}")
                # 严格顺序校验
                if list(data.columns) != self.features:
                    raise ValueError(f"特征顺序错误，要求顺序: {self.features}")
            else:
                raise TypeError("输入必须为DataFrame以进行特征校验")

    def prepare_data(self, combined_data):
        """安全数据预处理流程"""
        try:
            # 特征校验
            self._validate_features(combined_data)

            # 数据诊断
            print("\n=== 数据诊断 ===")
            print(f"总样本量: {len(combined_data)}")
            print(f"特征缺失统计:\n{combined_data[self.features].isnull().sum()}")

            # 拆分数据集
            train_data, val_data = train_test_split(
                combined_data, test_size=0.2, shuffle=False
            )

            # 标准化处理 (适配特征校验)
            self.scaler.fit(train_data[self.features])
            train_scaled = self.scaler.transform(train_data[self.features])
            val_scaled = self.scaler.transform(val_data[self.features])

            # 创建序列
            def _create_dataset(data, original_df):
                X, y = [], []
                for i in range(len(data) - self.config['seq_length']):
                    seq = data[i:i + self.config['seq_length']]
                    # 标签生成逻辑
                    future_3day = original_df['close'].iloc[
                                  i + self.config['seq_length']:i + self.config['seq_length'] + 3]
                    y.append(1 if (future_3day[-1] / future_3day[0] - 1) > 0.005 else 0)
                    X.append(seq.flatten())  # 展平处理
                return np.array(X), np.array(y)

            X_train, y_train = _create_dataset(train_scaled, train_data)
            X_val, y_val = _create_dataset(val_scaled, val_data)

            # 过采样处理
            ros = RandomOverSampler(random_state=self.config['random_state'])
            return ros.fit_resample(X_train, y_train), (X_val, y_val)

        except Exception as e:
            error_msg = f"数据准备失败: {str(e)}\n输入数据信息:\n{combined_data.head()}"
            raise RuntimeError(error_msg) from e

    def train(self, combined_data):
        """安全训练流程"""
        try:
            # 数据准备
            (X_train, y_train), (X_val, y_val) = self.prepare_data(combined_data)

            # 模型初始化
            self.model = SVC(
                kernel=self.config['kernel'],
                C=self.config['C'],
                class_weight=self.config['class_weight'],
                probability=self.config['probability'],
                random_state=self.config['random_state']
            )

            # 训练流程
            print("\n=== 训练开始 ===")
            self.model.fit(X_train, y_train)

            # 保存模型（含特征元数据）
            os.makedirs(os.path.dirname(self.config['model_path']), exist_ok=True)
            joblib.dump({
                'model': self.model,
                'scaler': self.scaler,
                'features': self.features,
                'config': self.config
            }, self.config['model_path'])
            print(f"模型已保存至 {self.config['model_path']}")

            return self.validate(X_val, y_val)

        except Exception as e:
            raise RuntimeError(f"训练失败: {str(e)}") from e

    def validate(self, X_val, y_val):
        """增强验证方法"""
        try:
            val_probs = self.model.predict_proba(X_val)[:, 1]

            # 动态阈值计算
            thresholds = np.linspace(0.1, 0.9, 9)
            best_threshold = max(
                thresholds,
                key=lambda th: f1_score(y_val, (val_probs > th).astype(int))
            )

            # 指标计算
            final_preds = (val_probs > best_threshold).astype(int)
            return {
                'threshold': best_threshold,
                'accuracy': accuracy_score(y_val, final_preds),
                'precision': precision_score(y_val, final_preds, zero_division=0),
                'recall': recall_score(y_val, final_preds, zero_division=0),
                'f1': f1_score(y_val, final_preds, zero_division=0),
                'auc': roc_auc_score(y_val, val_probs),
                'probs': val_probs.tolist()  # 序列化处理
            }
        except Exception as e:
            raise RuntimeError(f"验证失败: {str(e)}") from e

    def predict(self, input_data):
        """安全预测接口"""
        try:
            # 输入转换
            if isinstance(input_data, pd.DataFrame):
                self._validate_features(input_data)
                raw_df = input_data[self.features].copy()
            else:
                # 当关闭特征校验时允许数组输入
                if self.config['feature_verify']:
                    raise TypeError("特征校验开启时必须使用DataFrame输入")
                input_array = np.asarray(input_data, dtype=np.float32)
                if input_array.ndim != 2 or input_array.shape[1] != len(self.features):
                    raise ValueError(f"输入维度错误，要求(样本数, {len(self.features)})")
                raw_df = pd.DataFrame(input_array, columns=self.features)

            # 标准化处理
            scaled = self.scaler.transform(raw_df)

            # 序列构建
            if scaled.shape[0] < self.config['seq_length']:
                raise ValueError(f"需要至少{self.config['seq_length']}天数据，当前{scaled.shape[0]}")

            # 展平序列
            seq = scaled[-self.config['seq_length']:].flatten().reshape(1, -1)

            # 执行预测
            return float(self.model.predict_proba(seq)[0][1])

        except Exception as e:
            error_detail = (
                f"预测失败: {str(e)}\n"
                f"输入类型: {type(input_data)}\n"
                f"特征配置: {self.features}"
            )
            if hasattr(input_data, 'shape'):
                error_detail += f"\n输入维度: {input_data.shape}"
            raise RuntimeError(error_detail) from e


class LSTMModel:
    def __init__(self, features, config=None):
        self.features = features
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 默认配置
        self.config = {
            "model_path": "./model/lstm_model.pth",
            "scaler_path": "./model/lstm_scaler.pkl",
            "seq_length": 30,
            "hidden_size": 128,
            "num_layers": 2,
            "dropout": 0.3,
            "batch_size": 64,
            "epochs": 200,
            "lr": 0.001,
            "weight_decay": 1e-4,
            "early_stop_patience": 15
        }
        if config:
            self.config.update(config)

        # 初始化组件
        self.scaler = MinMaxScaler()
        self.model = None
        self._init_model()
        self._load_if_exists()

    class _EnhancedLSTM(torch.nn.Module):
        def __init__(self, input_size, config):
            super().__init__()
            self.lstm = torch.nn.LSTM(
                input_size=input_size,
                hidden_size=config['hidden_size'],
                num_layers=config['num_layers'],
                batch_first=True,
                bidirectional=True,
                dropout=config['dropout'] if config['num_layers'] > 1 else 0
            )
            self.dropout = torch.nn.Dropout(config['dropout'])
            self.fc = torch.nn.Linear(config['hidden_size'] * 2, 1)

            # 参数初始化
            for name, param in self.lstm.named_parameters():
                if 'weight' in name:
                    torch.nn.init.orthogonal_(param)
                elif 'bias' in name:
                    torch.nn.init.constant_(param, 0.1)
            torch.nn.init.xavier_uniform_(self.fc.weight)

        def forward(self, x):
            out, _ = self.lstm(x)
            out = self.dropout(out.mean(dim=1))
            return self.fc(out)

    def _init_model(self):
        """初始化模型结构"""
        self.model = self._EnhancedLSTM(
            input_size=len(self.features),
            config=self.config
        ).to(self.device)
        print(f"模型结构:\n{self.model}")
        print(f"可训练参数数量: {sum(p.numel() for p in self.model.parameters() if p.requires_grad)}")

    def _load_if_exists(self):
        """加载已有模型"""
        if os.path.exists(self.config['model_path']):
            print("加载预训练模型...")
            self.model.load_state_dict(torch.load(self.config['model_path'], map_location=self.device))
            self.scaler = joblib.load(self.config['scaler_path'])

    def prepare_data(self, data):
        """数据预处理流程"""
        print("\n=== 数据诊断 ===")
        print(f"总样本量: {len(data)}")
        print(f"特征缺失统计:\n{data[self.features].isnull().sum()}")

        # 拆分数据集
        train_data, val_data = train_test_split(data, test_size=0.2, shuffle=False)

        # 标准化处理
        self.scaler.fit(train_data[self.features])
        train_scaled = self.scaler.transform(train_data[self.features])
        val_scaled = self.scaler.transform(val_data[self.features])

        # 创建时间序列
        def _create_sequences(data, original_df):
            X, y = [], []
            for i in range(len(data) - self.config['seq_length']):
                future_3day = original_df['close'].iloc[i + self.config['seq_length']:i + self.config['seq_length'] + 3]
                y.append(1 if (future_3day[-1] / future_3day[0] - 1) > 0.005 else 0)
                X.append(data[i:i + self.config['seq_length']])
            return np.array(X), np.array(y)

        X_train, y_train = _create_sequences(train_scaled, train_data)
        X_val, y_val = _create_sequences(val_scaled, val_data)

        # 处理数据不平衡
        print("\n=== 原始数据分布 ===")
        print(f"训练集正样本比例: {y_train.mean():.2%}")
        print(f"验证集正样本比例: {y_val.mean():.2%}")

        X_train_flat = X_train.reshape(X_train.shape[0], -1)
        ros = RandomOverSampler(random_state=42)
        X_res, y_res = ros.fit_resample(X_train_flat, y_train)
        X_train = X_res.reshape(-1, self.config['seq_length'], len(self.features))

        print("\n=== 过采样后分布 ===")
        print(f"新训练集样本量: {len(X_train)}")
        print(f"正样本比例: {y_res.mean():.2%}")

        return (
            torch.FloatTensor(X_train).to(self.device),
            torch.FloatTensor(y_res[:, None]).to(self.device),
            torch.FloatTensor(X_val).to(self.device),
            torch.FloatTensor(y_val[:, None]).to(self.device),
            len(y_train) / sum(y_train) if sum(y_train) > 0 else 0
        )

    def train(self, combined_data):
        """完整训练流程"""
        if os.path.exists(self.config['model_path']):
            print("检测到已有模型，跳过训练")
            return

        print("开始模型训练...")
        os.makedirs(os.path.dirname(self.config['model_path']), exist_ok=True)

        # 准备数据
        X_train, y_train, X_val, y_val, pos_weight = self.prepare_data(combined_data)

        # 定义损失函数
        class FocalLoss(torch.nn.Module):
            def __init__(self, alpha=0.25, gamma=2):
                super().__init__()
                self.alpha = alpha
                self.gamma = gamma

            def forward(self, inputs, targets):
                BCE_loss = torch.nn.functional.binary_cross_entropy_with_logits(
                    inputs, targets, reduction='none')
                pt = torch.exp(-BCE_loss)
                return torch.mean(self.alpha * (1 - pt) **  self.gamma * BCE_loss)

                # 训练配置
        criterion = FocalLoss(alpha=pos_weight)
        optimizer = torch.optim.AdamW(
            self.model.parameters(),
            lr=self.config['lr'],
            weight_decay=self.config['weight_decay']
        )
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, 'min', patience=7, factor=0.5
        )

        best_auc = -np.inf
        early_stop_counter = 0

        # 训练循环
        for epoch in range(self.config['epochs']):
            self.model.train()
            outputs = self.model(X_train)
            loss = criterion(outputs, y_train)

            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
            optimizer.step()

            # 验证步骤
            val_metrics = self.validate(X_val, y_val)
            scheduler.step(val_metrics['loss'])

            # 早停机制
            if val_metrics['auc'] > best_auc:
                best_auc = val_metrics['auc']
                early_stop_counter = 0
                torch.save(self.model.state_dict(), self.config['model_path'])
                joblib.dump(self.scaler, self.config['scaler_path'])
                print(f"Epoch {epoch + 1}: 保存最佳模型 | AUC: {best_auc:.4f}")
            else:
                early_stop_counter += 1
                if early_stop_counter >= self.config['early_stop_patience']:
                    print(f"Epoch {epoch + 1}: 早停触发")
                    break

            # 训练监控
            if epoch % 5 == 0:
                print(f"\nEpoch {epoch + 1}/{self.config['epochs']}")
                print(f"  Train Loss: {loss.item():.4f}  Val Loss: {val_metrics['loss']:.4f}")
                print(f"  Val AUC: {val_metrics['auc']:.4f}  Acc: {val_metrics['accuracy']:.2%}")

        return best_auc

    def validate(self, X_val, y_val):
        """验证评估"""
        self.model.eval()
        with torch.no_grad():
            outputs = self.model(X_val)
            probs = torch.sigmoid(outputs).cpu().numpy()
            loss = torch.nn.functional.binary_cross_entropy_with_logits(outputs, y_val).item()

            # 动态阈值调整
            thresholds = np.linspace(0.1, 0.9, 9)
            best_threshold = max(thresholds, key=lambda th: f1_score(y_val.cpu(), (probs > th).astype(int)))
            preds = (probs > best_threshold).astype(int)

            return {
                'loss': loss,
                'auc': roc_auc_score(y_val.cpu(), probs),
                'accuracy': accuracy_score(y_val.cpu(), preds),
                'precision': precision_score(y_val.cpu(), preds, zero_division=0),
                'recall': recall_score(y_val.cpu(), preds, zero_division=0),
                'f1': f1_score(y_val.cpu(), preds, zero_division=0),
                'threshold': best_threshold
            }

    def predict(self, input_data):
        """统一预测接口（特征名称安全版）"""
        try:
            # ===== 特征名称安全转换 =====
            # 强制转换为DataFrame并验证特征顺序
            if isinstance(input_data, pd.DataFrame):
                # 列名精确匹配校验
                if list(input_data.columns) != self.features:
                    raise ValueError(f"特征列顺序不匹配，要求顺序：{self.features}")
                raw_df = input_data[self.features].copy()
            else:
                # 将数组转换为DataFrame并设置列名
                input_array = np.asarray(input_data)
                if input_array.ndim != 2 or input_array.shape[1] != len(self.features):
                    raise ValueError(f"输入维度错误，要求(样本数, {len(self.features)})，实际：{input_array.shape}")
                raw_df = pd.DataFrame(input_array, columns=self.features)

            # ===== 标准化处理（保持特征上下文） =====
            # 使用Pipeline的transform方法保持列名
            scaled_data = self.scaler.transform(raw_df)  # 此处self.scaler应为包含ColumnTransformer的Pipeline

            # ===== 序列验证与构建 =====
            if len(scaled_data) < self.config['seq_length']:
                raise ValueError(f"需要至少{self.config['seq_length']}天数据，当前{len(scaled_data)}天")

            # 创建符合模型输入的序列
            seq = scaled_data[-self.config['seq_length']:]
            tensor_seq = torch.FloatTensor(seq).unsqueeze(0).to(self.device)  # (1, seq_len, features)

            # ===== 执行预测 =====
            self.model.eval()
            with torch.no_grad():
                logits = self.model(tensor_seq)
                return torch.sigmoid(logits).item()

        except Exception as e:
            error_detail = (
                f"预测失败: {str(e)}\n"
                f"输入数据类型: {type(input_data)}\n"
                f"预期特征: {self.features}"
            )
            if isinstance(input_data, (pd.DataFrame, np.ndarray)):
                error_detail += f"\n实际特征数: {input_data.shape[1]}"
            raise RuntimeError(error_detail) from e

