import numpy as np
import random


class SVR:
    def __init__(self, kernel='rbf', C=1.0, epsilon=0.1, tol=1e-3, max_iter=1000, gamma=None):
        """
        初始化SVR模型

        参数:
        kernel: 核函数类型，可选 'linear', 'poly', 'rbf'
        C: 惩罚系数，控制错误的惩罚力度
        epsilon: ε-不敏感损失函数中的epsilon值，定义了无错误区域的宽度
        tol: 停止条件的容忍度
        max_iter: 最大迭代次数
        gamma: 核函数参数（RBF核的宽度参数）

        SVR原理:
        支持向量回归通过寻找一个函数f(x)，使得:
        1. 所有样本点与f(x)的偏差不超过epsilon
        2. f(x)尽可能平滑（即具有最小的二范数）
        """
        self.kernel = kernel
        self.C = C
        self.epsilon = epsilon
        self.tol = tol
        self.max_iter = max_iter
        self.gamma = gamma

        # 模型参数
        self.X = None
        self.y = None
        self.alphas_pos = None  # 拉格朗日乘子α*
        self.alphas_neg = None  # 拉格朗日乘子α
        self.b = 0.0           # 偏置项
        self.kernel_matrix = None  # 核矩阵，用于存储计算结果

    def _kernel_function(self, x1, x2):
        """
        计算核函数值

        SVR中核函数的作用:
        核函数使SVR能够在高维特征空间中构建非线性回归模型，同时避免显式计算高维映射
        """
        if self.kernel == 'linear':
            # 线性核: K(x1,x2) = x1·x2
            return np.dot(x1, x2)

        elif self.kernel == 'poly':
            # 多项式核: K(x1,x2) = (gamma*x1·x2 + 1)^3
            return (0.1 * np.dot(x1, x2) + 1) ** 3

        elif self.kernel == 'rbf':
            # 径向基核(高斯核): K(x1,x2) = exp(-gamma||x1-x2||^2)
            if self.gamma is None:
                gamma = 1.0 / x1.shape[0]
            else:
                gamma = self.gamma
            return np.exp(-gamma * np.sum((x1 - x2) ** 2))

    def _init_kernel_matrix(self, X):
        """
        初始化核矩阵

        原理:
        提前计算所有样本点之间的核函数值，避免重复计算，提高效率
        """
        n_samples = X.shape[0]
        K = np.zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                K[i, j] = self._kernel_function(X[i], X[j])
        return K

    def _select_random_index(self, i, n_samples):
        """
        随机选择一个与i不同的索引

        原理:
        SMO算法需要同时优化两个拉格朗日乘子，因此需要选择第二个不同的样本
        """
        j = i
        while j == i:
            j = random.randint(0, n_samples - 1)
        return j

    def _select_second_alpha(self, i, error_i, errors):
        """
        启发式地选择第二个alpha

        原理:
        选择使|E_i - E_j|最大的j，这样可以使优化步骤更有效
        """
        n_samples = len(errors)
        max_diff = 0
        j = -1

        # 找到使|E_i - E_j|最大的j
        for k in range(n_samples):
            error_diff = abs(error_i - errors[k])
            if error_diff > max_diff:
                max_diff = error_diff
                j = k

        # 如果没有找到合适的j，随机选择
        if j == -1:
            j = self._select_random_index(i, n_samples)

        return j

    def _clip_alpha(self, alpha, L, H):
        """
        将alpha限制在[L, H]范围内

        原理:
        由于拉格朗日乘子需要满足约束条件，所以需要将其裁剪在有效范围内
        """
        if alpha < L:
            return L
        elif alpha > H:
            return H
        else:
            return alpha

    def _compute_error(self, index):
        """
        计算给定样本的预测误差

        原理:
        误差 = 预测值 - 真实值
        """
        prediction = self._predict_point(self.X[index])
        return prediction - self.y[index]

    def _predict_point(self, x):
        """
        对单个样本点进行预测

        SVR预测公式:
        f(x) = sum((alpha^*_i - alpha_i) * K(x_i, x)) + b
        """
        n_samples = self.X.shape[0]
        result = 0.0

        for i in range(n_samples):
            kernel_value = self._kernel_function(self.X[i], x)
            result += (self.alphas_pos[i] - self.alphas_neg[i]) * kernel_value

        return result + self.b

    def fit(self, X, y):
        """
        训练SVR模型

        SVR训练过程:
        使用SMO算法求解对偶问题，找到最优的拉格朗日乘子和偏置项
        """
        # 转换为numpy数组
        X = np.array(X)
        y = np.array(y)

        n_samples, n_features = X.shape
        self.X = X
        self.y = y

        # 初始化拉格朗日乘子
        self.alphas_pos = np.zeros(n_samples)  # α*
        self.alphas_neg = np.zeros(n_samples)  # α
        self.b = 0.0

        # 初始化核矩阵
        self.kernel_matrix = self._init_kernel_matrix(X)

        # SMO算法迭代
        iteration = 0
        changed_alphas = 0
        examine_all = True

        # 存储每个样本的预测误差
        errors = [self._compute_error(i) for i in range(n_samples)]

        while (iteration < self.max_iter) and (changed_alphas > 0 or examine_all):
            changed_alphas = 0

            # 遍历所有样本
            for i in range(n_samples):
                error_i = errors[i]

                # 检查KKT条件是否违反
                r_pos_i = error_i + self.epsilon - self.y[i]
                r_neg_i = error_i - self.epsilon + self.y[i]

                if ((r_pos_i < -self.tol and self.alphas_pos[i] < self.C) or
                    (r_pos_i > self.tol and self.alphas_pos[i] > 0) or
                    (r_neg_i < -self.tol and self.alphas_neg[i] < self.C) or
                        (r_neg_i > self.tol and self.alphas_neg[i] > 0)):

                    # 选择第二个alpha
                    j = self._select_second_alpha(i, error_i, errors)
                    error_j = errors[j]

                    # 记录旧值
                    alpha_pos_i_old = self.alphas_pos[i]
                    alpha_neg_i_old = self.alphas_neg[i]
                    alpha_pos_j_old = self.alphas_pos[j]
                    alpha_neg_j_old = self.alphas_neg[j]

                    # 计算约束边界
                    L_pos = max(0, self.alphas_pos[j] - self.alphas_pos[i])
                    H_pos = min(self.C, self.C +
                                self.alphas_pos[j] - self.alphas_pos[i])
                    L_neg = max(0, self.alphas_neg[j] - self.alphas_neg[i])
                    H_neg = min(self.C, self.C +
                                self.alphas_neg[j] - self.alphas_neg[i])

                    if L_pos == H_pos and L_neg == H_neg:
                        continue

                    # 计算核函数值
                    eta = 2 * \
                        self.kernel_matrix[i, j] - self.kernel_matrix[i,
                                                                      i] - self.kernel_matrix[j, j]

                    if eta >= 0:
                        continue

                    # 更新alpha_pos_j
                    self.alphas_pos[j] -= (error_i - error_j) / eta
                    self.alphas_pos[j] = self._clip_alpha(
                        self.alphas_pos[j], L_pos, H_pos)

                    # 如果变化太小，跳过
                    if abs(self.alphas_pos[j] - alpha_pos_j_old) < 1e-5:
                        continue

                    # 更新alpha_pos_i
                    self.alphas_pos[i] += (alpha_pos_j_old -
                                           self.alphas_pos[j])

                    # 更新alpha_neg_j
                    self.alphas_neg[j] -= (error_i - error_j) / eta
                    self.alphas_neg[j] = self._clip_alpha(
                        self.alphas_neg[j], L_neg, H_neg)

                    # 如果变化太小，跳过
                    if abs(self.alphas_neg[j] - alpha_neg_j_old) < 1e-5:
                        continue

                    # 更新alpha_neg_i
                    self.alphas_neg[i] += (alpha_neg_j_old -
                                           self.alphas_neg[j])

                    # 更新偏置项b
                    b1 = self.b - error_i \
                        - (self.alphas_pos[i] - alpha_pos_i_old) * self.kernel_matrix[i, i] \
                        - (self.alphas_pos[j] - alpha_pos_j_old) * self.kernel_matrix[i, j] \
                        + (self.alphas_neg[i] - alpha_neg_i_old) * self.kernel_matrix[i, i] \
                        + (self.alphas_neg[j] - alpha_neg_j_old) * \
                        self.kernel_matrix[i, j]

                    b2 = self.b - error_j \
                        - (self.alphas_pos[i] - alpha_pos_i_old) * self.kernel_matrix[i, j] \
                        - (self.alphas_pos[j] - alpha_pos_j_old) * self.kernel_matrix[j, j] \
                        + (self.alphas_neg[i] - alpha_neg_i_old) * self.kernel_matrix[i, j] \
                        + (self.alphas_neg[j] - alpha_neg_j_old) * \
                        self.kernel_matrix[j, j]

                    # 更新b
                    if 0 < self.alphas_pos[i] < self.C:
                        self.b = b1
                    elif 0 < self.alphas_pos[j] < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2

                    # 更新误差缓存
                    for k in range(n_samples):
                        errors[k] = self._compute_error(k)

                    changed_alphas += 1

            if examine_all:
                examine_all = False
            elif changed_alphas == 0:
                examine_all = True

            iteration += 1

        print(f"训练完成，迭代次数: {iteration}")

        # 识别支持向量
        sv_indices = np.where((self.alphas_pos > 1e-5) |
                              (self.alphas_neg > 1e-5))[0]
        print(f"支持向量数量: {len(sv_indices)}/{n_samples}")

        return self

    def predict(self, X):
        """
        对新数据进行预测

        原理:
        使用学习到的参数，根据SVR的决策函数计算预测值
        """
        X = np.array(X)
        n_samples = X.shape[0]
        predictions = np.zeros(n_samples)

        for i in range(n_samples):
            predictions[i] = self._predict_point(X[i])

        return predictions


# 使用示例
if __name__ == "__main__":
    # 生成一些示例数据
    np.random.seed(42)
    X = np.sort(5 * np.random.rand(100, 1), axis=0)
    y = np.sin(X).ravel() + 0.1 * np.random.randn(100)

    # 训练SVR模型
    svr = SVR(kernel='rbf', C=0.5, epsilon=0.1, gamma=0.5)
    svr.fit(X, y)

    # 预测
    y_pred = svr.predict(X)

    # 计算均方误差
    mse = np.mean((y - y_pred) ** 2)
    print(f"均方误差: {mse}")

    # 绘制结果
    try:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(10, 6))
        plt.scatter(X, y, color='darkorange', label='数据点')
        plt.plot(X, y_pred, color='navy', label='SVR预测')
        plt.xlabel('X')
        plt.ylabel('y')
        plt.title('支持向量回归')
        plt.legend()
        plt.show()
    except ImportError:
        print("请安装matplotlib以可视化结果")
