import numpy as np
from collections import defaultdict


class SVMClassifier:
    """改进后的软间隔SVM分类器"""

    def __init__(self, C=0.1, max_iter=1000, learning_rate=0.01, random_state=None):
        self.C = C  # 正则化参数
        self.max_iter = max_iter
        self.learning_rate = learning_rate  # 学习率参数
        self.random_state = random_state
        self.w = None
        self.b = None
        self.feature_importances_ = None  # 特征重要性

    def fit(self, X, y):
        if self.random_state is not None:
            np.random.seed(self.random_state)

        # 转换为±1并检查
        y = np.where(np.asarray(y) == 0, -1, 1)

        n_samples, n_features = X.shape
        self.w = np.zeros(n_features)
        self.b = 0.0


        for epoch in range(self.max_iter):
            dw = np.zeros(n_features)
            db = 0.0
            hinge_loss = 0.0  # 改名为hinge_loss更准确

            for i in range(n_samples):
                margin = y[i] * (np.dot(X[i], self.w) + self.b)
                if margin < 1:  # 违反间隔
                    dw += -y[i] * X[i]
                    db += -y[i]
                    hinge_loss += max(0, 1 - margin)  # 使用max更安全



            # 添加L2正则化和梯度裁剪
            dw = dw / n_samples + self.w / (self.C * n_samples)
            db = db / n_samples

            grad_norm = np.sqrt(np.sum(dw ** 2) + db ** 2)
            # 添加梯度裁剪
            max_grad_norm = 1.0
            if grad_norm > max_grad_norm:
                scale = max_grad_norm / grad_norm
            dw *= scale
            db *= scale

            # 更新参数
            self.w -= self.learning_rate * dw  # 使用实例的学习率参数
            self.b -= self.learning_rate * db

            # 计算正则化后的总损失
            # reg_term = 0.5 * np.dot(self.w, self.w) / (self.C * n_samples)
            reg_term = 0.5 * np.linalg.norm(self.w, ord=2) ** 2 / (self.C * n_samples)
            # reg_term = 0.5 * np.linalg.norm(self.w, ord=2) ** 2 / self.C
            total_loss = (hinge_loss / n_samples) + reg_term  # 归一化并加入正则项

            # 早期停止
            if epoch % 100 == 0:

                # print(0.5 * np.linalg.norm(self.w, ord=2) ** 2 )
                # print(f"Epoch {epoch}, Loss: {total_loss:.4f}, ||w||: {np.linalg.norm(self.w):.4f}, b: {self.b:.4f}")
                print(f"Epoch {epoch}, Loss: {total_loss:.4f}, Hinge: {hinge_loss / n_samples:.4f}, Reg: {reg_term:.4f}, ||w||: {np.linalg.norm(self.w):.4f}")
            if np.linalg.norm(dw) < 1e-5 and abs(db) < 1e-5:
                break

        # 计算特征重要性
        self._compute_feature_importances()


    def _compute_feature_importances(self):
        """安全计算特征重要性"""
        if self.w is not None:
            self.feature_importances_ = np.abs(self.w)
            total = np.sum(self.feature_importances_)
            if total > 0:
                self.feature_importances_ /= total
            else:
                self.feature_importances_ = np.zeros_like(self.w)
        else:
            raise ValueError("权重向量w未初始化，请先调用fit()方法")

    def predict(self, X):
        scores = np.dot(X, self.w) + self.b
        return (scores > 0).astype(int)  # 直接返回0/1分类
    def predict_proba(self, X):
        # 改进的概率估计，防止数值溢出
        scores = np.dot(X, self.w) + self.b
        # 数值稳定的sigmoid计算
        # prob = np.zeros_like(scores)
        # pos_mask = scores >= 0
        # neg_mask = ~pos_mask
        # prob[pos_mask] = 1 / (1 + np.exp(-scores[pos_mask]))
        # prob[neg_mask] = np.exp(scores[neg_mask]) / (1 + np.exp(scores[neg_mask]))
        # return np.vstack([1 - prob, prob]).T

        # 更稳定的计算方式
        prob = np.exp(-np.logaddexp(0, -scores))  # = 1/(1+exp(-s))
        return np.vstack([1 - prob, prob]).T