import numpy as np
import cv2
import matplotlib.pyplot as plt
from annoy import AnnoyIndex
from skimage.metrics import peak_signal_noise_ratio
import warnings

warnings.filterwarnings('ignore')

"""
图像恢复过程：
1. 分块 -> 2. 寻找相似块 -> 3. 随机构建相似块矩阵 -> 4. SVD -> 5. 重构图像
"""

dist = []


def init(src):
    """
    初始化图像，进行归一化处理。

    :param src: 输入图像（灰度图）
    :return: 归一化后的图像
    """
    t = src.max()
    if 1 < t <= 255:
        return src
    else:
        return cv2.normalize(src, None, 0, 255, cv2.NORM_MINMAX)


def block(src, block_size, step):
    """
    将图像分割成小块。

    :param src: 输入图像
    :param block_size: 块的大小
    :param step: 块的滑动步幅
    :return: 块列表和行列数量
    """
    block_list = []
    rows, cols = src.shape
    row_steps = range(0, rows - block_size + 1, step)
    col_steps = range(0, cols - block_size + 1, step)

    for i in row_steps:
        for j in col_steps:
            block_list.append(src[i: i + block_size, j: j + block_size])
    return block_list, len(row_steps), len(col_steps)


def build_tree(blocks, method="euclidean"):
    """
    构建用于查找相似块的Annoy树。

    :param blocks: 块列表
    :param method: 相似度计算方法（可选："angular", "euclidean", "manhattan", "hamming", "dot"）
    :return: Annoy树
    """
    block_size = blocks[0].shape
    tree = AnnoyIndex(block_size[0] * block_size[1], metric=method)

    for index, matrix in enumerate(blocks):
        vector = matrix.flatten()
        tree.add_item(index, vector)
    tree.build(50, n_jobs=-1)
    return tree


def search(n, blocks, method="euclidean"):
    """
    查找每个块的n个相似块。

    :param n: 每个块要查找的相似块数量
    :param blocks: 块列表
    :param method: 相似度计算方法
    :return: 相似块列表和距离列表
    """
    tree = build_tree(blocks, method=method)
    similar_blocks = []

    for i in range(len(blocks)):
        temp = tree.get_nns_by_item(i, n, include_distances=True)
        similar_blocks.append(temp[0])
        dist.append(temp[1])

    return similar_blocks, dist


def random_svd(blocks, similar_blocks, k):
    """
    对相似块进行SVD降维，重构块。

    :param blocks: 原始块列表
    :param similar_blocks: 每个块的相似块索引列表
    :param k: SVD降维的维度
    :return: 降维重构后的块列表
    """
    blocks_matrix = np.array(blocks)
    r, c = blocks[0].shape
    result = []
    num_similar_blocks = len(similar_blocks[0])

    for i in similar_blocks:
        cur = i[0]
        indices = np.arange(len(i))
        np.random.shuffle(indices)
        i = np.array(i)[indices].tolist()
        cur = i.index(cur)

        temp = blocks_matrix[i].reshape(num_similar_blocks, -1).T

        u, sigma, v = np.linalg.svd(temp)
        temp_svd = np.mat(u[:, :k]) * np.mat(np.diag(sigma[:k])) * np.mat(v[:k, :])

        result.append(temp_svd[:, cur].reshape(r, c))

    return result


def rebuild_img_weighted(blocks_svd, r, c, block_size, step):
    """
    根据SVD重构图像，采用加权平均合成图像。

    :param blocks_svd: SVD重构后的块列表
    :param r: 图像行数
    :param c: 图像列数
    :param block_size: 块大小
    :param step: 块滑动步幅
    :return: 重构后的图像
    """
    h = (r - 1) * step + block_size
    w = (c - 1) * step + block_size
    img = np.zeros((h, w))
    weight = np.zeros((h, w))
    idx = 0

    for i in range(0, h - block_size + 1, step):
        for j in range(0, w - block_size + 1, step):
            img[i:i + block_size, j:j + block_size] += blocks_svd[idx]
            weight[i:i + block_size, j:j + block_size] += 1
            idx += 1

    return img / weight


def main(block_size, step, n, k, method="euclidean", img_noisy="./Gaussian_Lenna_sigma_20.png", img_path="./Lenna.png"):
    """
    主函数，执行图像恢复过程。

    :param block_size: 块大小
    :param step: 块滑动步幅
    :param n: 查找的相似块数量
    :param k: SVD降维的维度
    :param method: 相似度计算方法
    :param img_noisy: 含噪图像路径
    :param img_path: 原始图像路径
    :return: 含噪图像、原始图像和恢复后的图像
    """
    img = cv2.imread(img_noisy, 0)
    img = cv2.GaussianBlur(img, (3, 3), sigmaX=0, sigmaY=10)
    img_true = cv2.imread(img_path, 0)
    blocks, num_r, num_c = block(img, block_size=block_size, step=step)
    similar_list, dist = search(n=n, blocks=blocks, method=method)
    res_block = random_svd(blocks, similar_list, k)
    img_res = rebuild_img_weighted(res_block, num_r, num_c, block_size, step)

    return img, img_true, img_res


if __name__ == '__main__':
    img, img_true, img_res = main(block_size=8, step=2, n=32, k=4, method="manhattan")

    resized_image = cv2.resize(
        img_res,
        dsize=img_true.shape,
        interpolation=cv2.INTER_CUBIC
    )

    print(peak_signal_noise_ratio(img_true, resized_image))
    print(peak_signal_noise_ratio(img_true, img))

    l_p = np.array([
        [0, -1, 0],
        [-1, 5, -1],
        [0, -1, 0]
    ])
    resized_image = cv2.filter2D(resized_image, -1, kernel=l_p)

    plt.figure(figsize=(20, 5))
    plt.subplot(141)
    plt.imshow(img_true, cmap='gray')
    plt.axis('off')
    plt.title('Original Image')
    plt.subplot(142)
    plt.imshow(img, cmap='gray')
    plt.axis('off')
    plt.title('Noise Image')
    plt.subplot(143)
    plt.imshow(img_res, cmap='gray')
    plt.axis('off')
    plt.title('Residual Image')
    plt.subplot(144)
    plt.imshow(resized_image, cmap='gray')
    plt.axis('off')
    plt.title('Resized Image')
    plt.tight_layout()
    plt.show()
