import numpy as np
import matplotlib.pyplot as plt
import dataloader
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import NearestNeighbors

def PCA(data, test_data, k=None, energy=None): # 获取前k大特征值的特征向量或能量到energy的特征向量
    if k is None and energy is None:
        return -1
    avg = data.mean(axis=0)
    data = data - avg
    cov = np.cov(data, rowvar=0)
    # 注意：特征向量是列向量
    feature_value, feature_vector = np.linalg.eig(np.mat(cov))
    # print(feature_vector)
    # index = np.argsort(-feature_value)
    # print(index)
    if k is None:
        total_energy = feature_value.sum()
        feature_value /= total_energy
        energy_sum = 0.
        k = 0
        while k < data.shape[0] and energy_sum < energy:
            energy_sum += feature_value[k]
            k += 1

    selected_vec = np.matrix(feature_vector.T[:k]) # dim: k * m
    # res_data = data * selected_vec.T # dim: (n * m) * (m * k) = (n * k)
    # print(res_data)
    return data * selected_vec.T, test_data * selected_vec.T


def LDA(data_x, data_y, data_test_x, k=None):
    mean_vectors = []
    # 类内平均值
    for c in range(10):
        mean_vectors.append(np.mean(data_x[data_y == c], axis=0))
    # print(mean_vectors[1])
    n, m = data_x.shape
    S_w = np.zeros((m, m))
    for c, mean_vec in enumerate(mean_vectors):
        a = np.zeros((m, m))
        data_c = data_x[data_y == c]
        S_w += np.cov(data_x[data_y == c] - mean_vec, rowvar=0)
    # print(S_w.mean())

    S_b = np.zeros((m, m))
    all_mean = data_x.mean(axis=0)
    # print(all_mean)
    all_mean = all_mean.reshape((-1, 1))
    for c, mean_vec in enumerate(mean_vectors):
        N_c = data_x[data_y == c].shape[0]
        # print(N_c)
        mean_vec = mean_vec.reshape((-1, 1))
        a = mean_vec - all_mean
        # print(a)
        # print(a.dot(a.T).mean())
        S_b += N_c * (mean_vec - all_mean).dot((mean_vec - all_mean).T)
    # print(S_w)

    feature_value, feature_vector = np.linalg.eig(np.linalg.pinv(S_w).dot(S_b))
    # print(feature_vector.shape)
    real_feature_vector = np.zeros_like(feature_vector, dtype='float64')
    real_feature_value = np.zeros_like(feature_value, dtype='float64')
    for j in range(real_feature_vector.shape[1]):
        for i in range(real_feature_vector.shape[0]):
            real_feature_vector[i][j] = feature_vector[i][j].real
        real_feature_value[j] = feature_value[j].real
        # print(type(real_feature_value[j]))
        # print(feature_value[j])
        # print(type(feature_value[j].real))
        # return

    # print(real_feature_value)
    # print(real_feature_vector)
    index = np.argsort(-real_feature_value)
    # print(index)

    selected_vec = np.matrix(real_feature_vector.T[index[:k]]) # dim: k * m
    # res_data = data_x * selected_vec.T
    # print(res_data)
    return data_x * selected_vec.T, data_test_x * selected_vec.T


def plot_3D(data):
    colors = ['#FFFAFA', '#DCDCDC', '#FFDAB9', '#FFF5EE', '#000000',
        '#191970', '#00BFFF', '#00868B', '#FF6A6A', '#FF3030', ]
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    for c in range(10):
        plot_data = data[y_train == c]
        ax.scatter(plot_data[:, 0].tolist(), plot_data[:, 1].tolist(),
            plot_data[:, 2].tolist(), c=colors[c], alpha=0.4, marker='.')

    # ax.legend()
    plt.show()


def plot_2D(data):
    colors = ['#FFFAFA', '#DCDCDC', '#FFDAB9', '#FFF5EE', '#000000',
        '#191970', '#00BFFF', '#00868B', '#FF6A6A', '#FF3030', ]
    for c in range(10):
        plot_data = data[y_train == c]
        plt.scatter(plot_data[:, 0].tolist(), plot_data[:, 1].tolist(), c=colors[c], alpha=0.4, marker='.')
    plt.show()


def nearest_neighbor(x_train, y_train, x_test, y_test):
    neigh = NearestNeighbors(n_jobs=-1)
    neigh.fit(x_train)
    y_nearest = neigh.kneighbors(x_test, 1, return_distance=False)
    y_pred = y_train[y_nearest]
    # print(y_pred.flatten())
    # print(y_test)
    # print(y_pred.flatten() == y_test)
    # print(y_test.shape)
    acc = (y_pred.flatten() == y_test).sum() * 1. / y_test.shape[0]
    print('k : ', x_train.shape[1], '  acc: ', acc)
    return acc


if __name__ == '__main__':
    d = np.load('data/data.npz')
    x_train, y_train, x_test, y_test = d['x_train'], d['y_train'], d['x_test'], d['y_test']
    random_index = np.arange(x_train.shape[0])
    np.random.shuffle(random_index)
    # data_num = 2000
    # x_train, y_train = x_train[random_index[:data_num]], y_train[random_index[:data_num]]
    new_train_data, new_test_data = LDA(x_train, y_train, x_test, k=3)
    # new_data = LDA(x_train, y_train, k=3)
    plot_3D(new_train_data)

    # t = 5000
    # nearest_neighbor(x_train[:t], y_train[:t], x_test[:t], y_test[:t])
    # new_train_data, new_test_data = PCA(x_train, x_test, energy=0.95)
    # nearest_neighbor(new_train_data, y_train, new_test_data, y_test)
    # for k in [40, 80, 200]:
    #     print('LDA:')
    #     new_train_data, new_test_data = LDA(x_train, y_train, x_test, k=k)
    #     nearest_neighbor(new_train_data, y_train, new_test_data, y_test)
    #     print('PCA:')
    #     new_train_data, new_test_data = PCA(x_train, x_test, k=k)
    #     nearest_neighbor(new_train_data, y_train, new_test_data, y_test)
