import numpy as np
from sklearn.preprocessing import normalize
from scipy.linalg import svd
from sklearn.metrics import accuracy_score
import time


class KSVDDirectRecognition:
    def __init__(self, n_atoms=256, sparsity=10, max_iter=50):
        self.n_atoms = n_atoms
        self.sparsity = sparsity
        self.max_iter = max_iter
        self.D = None  # 字典
        self.mean_vectors = {}  # 每个人的平均特征向量

    def omp(self, y, D, sparsity):
        """正交匹配追踪算法"""
        n_features = D.shape[1]
        residual = y.copy()
        idx = []
        x = np.zeros(n_features)

        for _ in range(sparsity):
            # 找到与残差最相关的原子
            correlations = np.abs(D.T @ residual)
            # 排除已选原子
            correlations[idx] = 0
            new_idx = np.argmax(correlations)
            idx.append(new_idx)

            # 最小二乘求解
            D_selected = D[:, idx]
            x_selected = np.linalg.pinv(D_selected) @ y
            residual = y - D_selected @ x_selected

            if np.linalg.norm(residual) < 1e-6:
                break

        x[idx] = x_selected
        return x

    def ksvd(self, Y, n_atoms, sparsity, max_iter):
        """KSVD字典学习算法"""
        n_features, n_samples = Y.shape

        # 初始化字典：随机选择训练样本
        indices = np.random.choice(n_samples, n_atoms, replace=False)
        D = Y[:, indices]
        D = normalize(D, axis=0, norm='l2')

        for iteration in range(max_iter):
            # 稀疏编码阶段
            X = np.zeros((n_atoms, n_samples))
            for i in range(n_samples):
                X[:, i] = self.omp(Y[:, i], D, sparsity)

            # 字典更新阶段
            for k in range(n_atoms):
                # 找到使用第k个原子的样本
                omega = np.where(X[k, :] != 0)[0]
                if len(omega) == 0:
                    # 如果没有样本使用该原子，重新初始化
                    D[:, k] = Y[:, np.random.randint(n_samples)]
                    D[:, k] = normalize(D[:, k].reshape(1, -1), axis=1).flatten()
                    continue

                # 计算残差
                E_k = Y - D @ X + np.outer(D[:, k], X[k, :])
                E_k_R = E_k[:, omega]

                # SVD分解
                try:
                    U, s, Vt = svd(E_k_R, full_matrices=False)
                    D[:, k] = U[:, 0]
                    X[k, omega] = s[0] * Vt[0, :]
                except:
                    continue

            # 字典归一化
            D = normalize(D, axis=0, norm='l2')

            # 计算重构误差
            error = np.mean(np.linalg.norm(Y - D @ X, axis=0))
            if iteration % 10 == 0:
                print(f"Iteration {iteration}, error: {error:.4f}")

        return D

    def fit(self, X_train, y_train):
        """训练模型"""
        print("Training KSVD dictionary...")
        self.D = self.ksvd(X_train.T, self.n_atoms, self.sparsity, self.max_iter)

        print("Computing sparse codes for training data...")
        # 为每个训练样本计算稀疏编码
        train_codes = []
        for i in range(X_train.shape[0]):
            code = self.omp(X_train[i], self.D, self.sparsity)
            train_codes.append(code)
        train_codes = np.array(train_codes)

        # 计算每个人的平均特征向量
        print("Computing mean feature vectors...")
        unique_labels = np.unique(y_train)
        for label in unique_labels:
            mask = (y_train == label)
            self.mean_vectors[label] = np.mean(train_codes[mask], axis=0)

        print(f"Training completed. Learned dictionary shape: {self.D.shape}")

    def predict(self, X_test):
        """预测"""
        predictions = []

        for i in range(X_test.shape[0]):
            # 计算测试样本的稀疏编码
            x_test = self.omp(X_test[i], self.D, self.sparsity)

            # 与每个人的平均特征向量计算相似度
            similarities = {}
            for label, mean_vec in self.mean_vectors.items():
                similarity = np.dot(x_test, mean_vec)
                similarities[label] = similarity

            # 选择相似度最高的类别
            pred_label = max(similarities, key=similarities.get)
            predictions.append(pred_label)

        return np.array(predictions)


# 使用示例
def test_traditional_ksvd():
    # 生成模拟数据
    np.random.seed(42)
    n_persons = 10
    n_samples_per_person = 20
    n_features = 100

    # 模拟人脸数据：每个人有特定的特征模式
    X_train = []
    y_train = []
    for person_id in range(n_persons):
        # 每个人的基础模式
        base_pattern = np.random.randn(n_features)
        for _ in range(n_samples_per_person):
            # 添加个体差异和噪声
            sample = base_pattern + 0.3 * np.random.randn(n_features)
            X_train.append(sample)
            y_train.append(person_id)

    X_train = np.array(X_train)
    y_train = np.array(y_train)

    # 测试数据
    X_test = []
    y_test = []
    for person_id in range(n_persons):
        base_pattern = np.random.randn(n_features)
        for _ in range(5):  # 每个测试人5个样本
            sample = base_pattern + 0.4 * np.random.randn(n_features)
            X_test.append(sample)
            y_test.append(person_id)

    X_test = np.array(X_test)
    y_test = np.array(y_test)

    # 训练和测试传统KSVD
    model = KSVDDirectRecognition(n_atoms=128, sparsity=8, max_iter=30)
    model.fit(X_train, y_train)

    predictions = model.predict(X_test)
    accuracy = accuracy_score(y_test, predictions)
    print(f"Traditional KSVD Accuracy: {accuracy:.4f}")

    return accuracy


if __name__ == "__main__":
    test_traditional_ksvd()