from view.endSrc.tDataset import tDataset
from complexityMeasure.SampleSize import SampleSize
from view.endSrc.DBConfig import DBConfig
from view.endSrc.MySqlConn import MySqlConn
import numpy as np
import random
import matplotlib.pyplot as plt
from complexityMeasure.CalcComplexity import CalcComplexity
from sklearn.neural_network import MLPClassifier


class DataPartition:

    def __init__(self, data, label):
        self.data = data
        self.rows = data.shape[0]
        self.label = label
        self.randomSampleBlocks = None
        self.randomSampleBlocks_labels = None

    def Partition(self, sizeLst):
        tmpidxs = list(range(self.rows))
        random.shuffle(tmpidxs)
        idxs = []
        for size in sizeLst:
            idxs.append(tmpidxs[:size])

        self.randomSampleBlocks = []
        self.randomSampleBlocks_labels = []
        for blockidxs in idxs:
            self.randomSampleBlocks.append(self.data[blockidxs, :])
            self.randomSampleBlocks_labels.append(self.label[blockidxs])

        validateIdxs = tmpidxs[sizeLst[-1]:]
        print('return a dataset(size:{}) for validate'.format(len(validateIdxs)))
        return self.data[validateIdxs, :], self.label[validateIdxs]


def normalization(traindata, testdata):
    maxima = np.max(traindata)
    minima = np.min(traindata)
    return (traindata - minima) / (maxima - minima), \
           (testdata - minima) / (maxima - minima)


def BLBsize(N, lbda=0.6):
    return N**lbda


def CMsize(data, label, e=0.05, z=1.96):
    cc = CalcComplexity(data.shape[1])
    paiLst = np.array([x for x in np.bincount(label) if x is not 0])
    paiLst = paiLst / sum(paiLst)

    cc.K = len(paiLst)
    cc.paiList = paiLst
    cc.calcEntropy()
    complexity = cc.calc()
    samplesize = SampleSize.samplesize(complexity=complexity, E=e, z=z)
    print('the complexity of this dataset (no contain validate dataset): {}, its sample size is {} (e={}, z={})'.format(
        complexity, samplesize, e, z))
    return samplesize


if __name__ == '__main__':
    random.seed(1)
    dsId = 1907  #1907  #2029
    ds = tDataset(MySqlConn(DBConfig('Compare with BLB')))
    ds.readDataset(dsId)
    sizeLst = [100, 200, 300, 500, 700, 800, 900, 1000, 1100, 1200, 1500, 2000, 4000, 5000, 7000, 8000, 10000, 20000, 35000]
    dp = DataPartition(ds.m_dataset, ds.m_labels)
    test_data, test_label = dp.Partition(sizeLst)

    accLst = []
    for train_data, train_label in zip(dp.randomSampleBlocks, dp.randomSampleBlocks_labels):
        mlp = MLPClassifier(hidden_layer_sizes=(100), activation="relu")

        train_data_nor, test_data_nor = normalization(train_data, test_data)
        mlp.fit(train_data_nor, train_label)

        predicted = mlp.predict(test_data_nor)
        predicted = np.asarray(predicted)
        diff = (predicted - test_label).tolist()
        accuracy = diff.count(0) / len(diff)
        accLst.append(accuracy)

    print(accLst)

    N = sizeLst[-1]
    bs = BLBsize(N, lbda=0.6)
    cs = CMsize(dp.randomSampleBlocks[-1], dp.randomSampleBlocks_labels[-1])
    print('bs:', bs)
    print('cs:', cs)

    plt.plot(sizeLst, accLst, marker='o')
    plt.plot([bs, bs], [min(accLst), max(accLst)*1.1], linestyle='--', label='BLB size')
    plt.text(bs, max(accLst)*1.1, int(bs))
    plt.plot([cs, cs], [min(accLst), max(accLst)*1.1], linestyle='-.', label='Complexity size')
    plt.text(cs, max(accLst) * 1.1, int(cs))
    plt.legend(loc='lower right')
    plt.grid()
    plt.show()
