import numpy as np
import struct
import matplotlib.pyplot as plt


def decode_idx3_ubyte(idx3_ubyte_file):
    """
    解析idx3文件的通用函数
    :param idx3_ubyte_file: idx3文件路径
    :return: 数据集
    """
    # 读取二进制数据
    bin_data = open(idx3_ubyte_file, 'rb').read()

    # 解析文件头信息，依次为魔数、图片数量、每张图片高、每张图片宽
    offset = 0
    fmt_header = '>iiii'
    magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
    print('magic number:%d, pic num:%d, pic size: %d*%d' % (magic_number, num_images, num_rows, num_cols))

    # 解析数据集
    image_size = num_rows * num_cols
    offset += struct.calcsize(fmt_header)
    fmt_image = '>' + str(image_size) + 'B'
    images = np.empty((num_images, num_rows, num_cols))
    for i in range(num_images):
        # if (i + 1) % 10000 == 0:
        #     print('已解析 %d' % (i + 1) + '张')
        images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
        offset += struct.calcsize(fmt_image)
    return images


def decode_idx1_ubyte(idx1_ubyte_file):
    """
    解析idx1文件的通用函数
    :param idx1_ubyte_file: idx1文件路径
    :return: 数据集
    """
    # 读取二进制数据
    bin_data = open(idx1_ubyte_file, 'rb').read()

    # 解析文件头信息，依次为魔数和标签数
    offset = 0
    fmt_header = '>ii'
    magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
    print('magic number:%d, pic num:%d' % (magic_number, num_images))

    # 解析数据集
    offset += struct.calcsize(fmt_header)
    fmt_image = '>B'
    labels = np.empty(num_images)
    for i in range(num_images):
        labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
        offset += struct.calcsize(fmt_image)
    return labels


def draw(k_list, accuracy_list):
    plt.plot(k_list, accuracy_list, color='red')
    plt.xlabel("k")
    plt.ylabel("accuracy")
    plt.tight_layout()  # 自动调整子图参数，使之填充整个图像区域
    plt.show()


def load_data():
    train_x_data = decode_idx3_ubyte('./data/train-images.idx3-ubyte')
    train_y = decode_idx1_ubyte('./data/train-labels.idx1-ubyte')

    test_x_data = decode_idx3_ubyte('./data/t10k-images.idx3-ubyte')
    test_y = decode_idx1_ubyte('./data/t10k-labels.idx1-ubyte')

    # 28*28的图像展开为784维的列向量
    train_x = np.resize(train_x_data, (train_x_data.shape[0], train_x_data.shape[1] * train_x_data.shape[2]))
    test_x = np.resize(test_x_data, (test_x_data.shape[0], test_x_data.shape[1] * test_x_data.shape[2]))

    return train_x / 255, train_y, test_x / 255, test_y


def MQDF2(x, eigenvalues_i, eigenvectors_i, mean_i, k):
    # 特征值在k:784维度上的均值
    delta = np.sum(eigenvalues_i[k: 784]) / (784 - k)
    epsilon = (np.sum((x - mean_i) ** 2) - np.sum(np.dot((x - mean_i), np.array(eigenvectors_i[:, 0:k])) ** 2))
    # Σ[(x-μ).T.dot(Φij)]**2/λij
    first = np.sum((np.dot((x - mean_i), np.array(eigenvectors_i[:, 0:k])) ** 2) / eigenvalues_i[0:k])
    second = epsilon / delta
    # log(λij)
    third = np.sum(np.log(eigenvalues_i[0:k].real))
    fourth = (784 - k) * np.log(delta)
    g2_i = -first - second - third - fourth
    return g2_i


def classifier(x_data, y_data, eigenvalues, eigenvectors, mean, k):
    # 在5000个样本上使用MQDF2进行分类
    x_num = len(x_data)
    class_value = np.zeros(x_num)
    for i in range(x_num):
        g2 = np.zeros(10)
        # 分别计算数字0~10的MQDF2的预测值，取其最大值所在的索引为分类结果
        for num in range(10):
            # 计算MQDF2值
            g2[num] = MQDF2(x_data[i], eigenvalues[num], eigenvectors[num], mean[num], k)
        # argmax(g2) 获取最大值的索引作为分类结果
        class_value[i] = np.argmax(g2)
    # 比较分类值与标签相符的元素个数，计算正确率
    accuracy = np.sum(class_value == y_data) / x_num

    return accuracy


# def MQDF(x, eigenvalues_i, eigenvectors_i, mean_i):
#     “”“未完成”“”
#     # Σ[(x-μ).T.dot(Φij)]**2/λij
#     sigma1 = np.sum((np.dot((x - mean_i), eigenvectors_i) ** 2) / eigenvalues_i)
#     # log(λij)
#     sigma2 = np.sum(np.log(eigenvalues_i))
#     # 计算MQDF的预测值g0
#     g0 = -sigma1 - sigma2
#
#     return g0


if __name__ == '__main__':
    # train_x大小为60000*784
    print('--------Load data----------')
    train_x, train_y, test_x, test_y = load_data()

    # 存放协方差矩阵，大小为10*784*784
    cov_mat = np.zeros((10, train_x.shape[1], train_x.shape[1]))
    # 存放特征值矩阵，大小为10*784。其中0~10为各个数字图片的协方差矩阵所有特征值，784中每个元素为不同的特征值
    eigenvalues = np.zeros((10, train_x.shape[1]))
    # 存放特征向量矩阵，大小为10*784*784。784*784中每一列分别为不同的特征向量
    eigenvectors = np.zeros((10, train_x.shape[1], train_x.shape[1]))
    # 存放均值,大小为10*784
    mean = np.zeros((10, train_x.shape[1]))

    # 参数获取
    # calculate covariance matrix、eigenvalues、eigenvectors、mean for images of different numbers
    for i in range(10):
        # 将0~10数字的图片根据标签进行分类提取，一般为5678张图像*784个像素值
        train_x_i = train_x[np.where(train_y == i)]  # np.where输出满足条件元素的坐标  a[np.where(a > 5)]等价于 a[a>5]

        # 计算每类数字的图片在28*28=784维度上的均值
        mean[i] = np.mean(train_x_i, axis=0)

        # 0~10每类的协方差矩阵
        cov_mat[i] = np.cov(train_x_i.T) / 784  # cov(m)列为特征，行为变量 train_x_i为5967*784，所以应转置

        # 返回的特征向量是按列放的，即一列代表一个特征向量，所以不是一个列向量，而是不同的特征向量组成的特征向量矩阵，大小为784*784
        # linalg.eig这个函数在计算的时候是以复数的形式运算的，算法在收敛时，虚部可能还没有完全收敛到0，但是都已经很小了，计算的时候可以直接取实部。
        # 可以使用x.real取得是实部  linalg.eigh返回实对称矩阵的特征值和特征向量,特征值按照从小到大排序。 linalg.eig特征值按照从大到小排序
        # 其中eigenvectors[i]是矩阵，它的列是Σ的特征向量，eigenvalues[i]是对角矩阵，其非零元素对应特征值。
        # eigh：Return the eigenvalues and eigenvectors of a  a real symmetric matrix.
        w, v = np.linalg.eig(cov_mat[i])
        eigenvalues[i] = w.real
        eigenvectors[i] = v.real

    print('--------Find the optimal cut-off value----------')
    # 保存最佳截断值
    k_list = []
    accuracy_list = []
    # 寻找最佳截断值k
    for k in range(10, 40):
        # 在5000个样本上使用MQDF2进行分类
        accuracy = classifier(train_x[:5000], train_y[:5000], eigenvalues, eigenvectors, mean, k)
        print("k = %d, accuracy rate = %f" % (k, accuracy))

        accuracy_list.append(accuracy)
        k_list.append(k)
    best_k = k_list[np.argmax(accuracy_list)]
    print("The optimal cutoff value k is", best_k)
    draw(k_list, accuracy_list)

    print('--------Test set validation----------')
    # 在测试集上使用最佳k值，验证MQDF的分类准确率
    accuracy = classifier(test_x, test_y, eigenvalues, eigenvectors, mean, best_k)
    print("k = %d, Test accuracy = %f" % (best_k, accuracy))


