import numpy as np
import numpy.linalg as lg
import math
import time

DIM = 3072
SMALL_DIM = 3072


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict


def getData(class_num):
    dataset = [None for _ in range(class_num)]
    # fetch data from all batches
    for i in range(1, 6):
        print('...fetching batch ' + str(i) + '...')
        data_dict = unpickle('../cifar-10-batches-py/data_batch_' + str(i))
        labels = data_dict[b'labels']
        data = np.array(data_dict[b'data'])
        for j in range(len(labels)):
            # jth picture is the class we need
            if labels[j] < class_num:
                # this is the first column
                if dataset[labels[j]] is None:
                    dataset[labels[j]] = data[j].flatten().reshape(DIM, 1)
                # add a column to the dataset
                else:
                    dataset[labels[j]] = np.c_[dataset[labels[j]], data[j]]
    return [smallDataSet(SMALL_DIM, DIM, dataset[z]) / 255 for z in range(class_num)]


def getTestData(class_num):
    dataset = [None for _ in range(class_num)]
    print('...fetching test batch...')
    data_dict = unpickle('../cifar-10-batches-py/test_batch')
    labels = data_dict[b'labels']
    data = np.array(data_dict[b'data'])
    for j in range(len(labels)):
        # jth picture is the class we need
        if labels[j] < class_num:
            # this is the first column
            if dataset[labels[j]] is None:
                dataset[labels[j]] = data[j].flatten().reshape(DIM, 1)
            # add a column to the dataset
            else:
                dataset[labels[j]] = np.c_[dataset[labels[j]], data[j]]
    return [smallDataSet(SMALL_DIM, DIM, dataset[z]) / 255 for z in range(class_num)]


def smallDataSet(small_dimension, big_dimension, dataset):
    assert (big_dimension % small_dimension == 0)
    if small_dimension == big_dimension:
        return dataset
    new_dataset = np.eye(small_dimension, dataset.shape[1])
    for i in range(small_dimension):
        k = np.zeros(dataset.shape[1])
        for j in range(int(big_dimension / small_dimension)):
            k = k + dataset[i * int(big_dimension / small_dimension) + j]
        k /= small_dimension
        new_dataset[i] = k
    return new_dataset


def calcCov(X, mu):
    # numbers of samples in X
    m = X.shape[1]
    X = X - mu.reshape(SMALL_DIM, 1)
    return (1 / (m - 1)) * (X.dot(X.T))


def calcMean(X):
    # numbers of samples in X
    m = X.shape[1]
    return X.sum(axis=1) / m


def LDF(x, cov, mean, p_omega):
    return (lg.inv(cov).dot(mean)).T.dot(x) - 0.5 * mean.T.dot(lg.inv(cov)).dot(mean) + math.log(p_omega)


def QDF(x, cov, mean, p_omega):
    (_, log_det) = lg.slogdet(cov)
    return np.einsum('ij,jk,ki->i', x.T, -0.5 * lg.inv(cov), x) + (lg.inv(cov).dot(mean)).T.dot(x) - 0.5 * mean.T.dot(
        lg.inv(cov)).dot(mean) + math.log(p_omega) - 0.5 * log_det


def LDF_classification(class_num, mu_list, P_list, Sigma_list, Test_X_list):
    # here we assume the cov matrices are the same, so we calculate the mean of them
    Sigma = sum(Sigma_list) / class_num

    # calculate total sample number
    total_number = 0
    for i in range(class_num):
        total_number += Test_X_list[i].shape[1]

    # testing classification using LDF
    LDF_correct = 0
    # LDF calculation
    for i in range(class_num):
        g = [0 for _ in range(class_num)]
        for k in range(class_num):
            g[k] = LDF(Test_X_list[i], Sigma, mu_list[k], P_list[i])
        ans = g[0] < g[1]
        LDF_correct += np.sum(ans == i)
    LDF_correct_rate = LDF_correct / total_number
    print('LDF Accuracy: {:.2%} out of {} test samples'.format(LDF_correct_rate, total_number))


def QDF_classification(class_num, mu_list, P_list, Sigma_list, Test_X_list):
    # calculate total sample number
    total_number = 0
    for i in range(class_num):
        total_number += Test_X_list[i].shape[1]

    # testing classification using QDF
    QDF_correct = 0
    # QDF calculation
    for i in range(class_num):
        g = [0 for _ in range(class_num)]
        for k in range(class_num):
            g[k] = QDF(Test_X_list[i], Sigma_list[i], mu_list[k], P_list[i])
        ans = g[0] < g[1]
        QDF_correct += np.sum(ans == i)
    QDF_correct_rate = QDF_correct / total_number
    print('QDF Accuracy: {:.2%} out of {} test samples'.format(QDF_correct_rate, total_number))


if __name__ == '__main__':
    # we do not want magic number
    class_number = 2
    # this program only fits 2-class-classification
    assert (class_number == 2)

    print('**************** BEGIN DATA PROCESSING ****************')
    data_start = time.time()
    print('getting train data')
    train_data = getData(class_number)
    print('getting test data, here time passed %.2f' % (time.time() - data_start), 'seconds')
    test_data = getTestData(class_number)
    print('calculating means and prior probability, here time passed %.2f' % (time.time() - data_start), 'seconds')
    data_mu = [calcMean(train_data[i]) for i in range(class_number)]
    tot = 0
    for i in range(class_number):
        tot += train_data[i].shape[1]
    data_P = [train_data[i].shape[i] / tot for i in range(class_number)]
    print('calculating covariance matrices, here time passed %.2f' % (time.time() - data_start), 'seconds')
    data_Sigma = [calcCov(train_data[i], data_mu[i]) for i in range(class_number)]
    data_end = time.time()
    print('Data processing time %.2f' % (data_end - data_start), 'seconds')

    print('**************** BEGIN LDF ****************')
    LDF_start = time.time()
    LDF_classification(class_number, data_mu, data_P, data_Sigma, test_data)
    LDF_end = time.time()
    print('LDF running time %.2f' % (LDF_end - LDF_start), 'seconds')

    print('**************** BEGIN QDF ****************')
    QDF_start = time.time()
    QDF_classification(class_number, data_mu, data_P, data_Sigma, test_data)
    QDF_end = time.time()
    print('QDF running time %.2f' % (QDF_end - QDF_start), 'seconds')
