
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

import numpy as np
import random
from pandas import DataFrame
import math

from view.endSrc.MyTools import MyTools
from view.endSrc.MixedGaussianDatasetGenerator import MixedGaussianDatasetGenerator
from view.endSrc.MixedGaussianDataset import MixedGaussianDataset
from view.endSrc.DBConfig import DBConfig
from view.endSrc.Dataset import Dataset
from view.endSrc.MySqlConn import MySqlConn


class loadSaveDB:

    def visualization(self, dataset):

        '''
        This method conduct the dimensionality reduction process by principle component analysis,
        which transform the data to 2D data, and plot the 2D data.
        '''

        pca = PCA(n_components=2)
        pca.fit(dataset)
        data2d = pca.fit_transform(dataset)
        x = data2d[:, 0]
        y = data2d[:, 1]
        plt.scatter(x, y, marker="+")

        plt.xlabel('dimension 1')
        plt.ylabel('dimension 2')
        plt.title('Principle Component Analysis to 2D data')
        plt.show()

    def visual2D(self, dataset):
        plt.scatter(dataset[:, 0], dataset[:, 1],
                    label='2D origin', marker="+")
        plt.xlabel('x')
        plt.ylabel('y')
        plt.title('2D origin data')
        plt.legend()
        plt.show()

    def GenerateManualData(self):


        meanVec = [
            [100, 100],
            [25, 50],
            [50, 20],
        ]
        cov = [
            [3, 0],
            [0, 3]
        ]
        size = 300

        x = np.array([])
        y = np.array([])


        for m in meanVec:
            xt, yt = np.random.multivariate_normal(m, cov, size).T
            x = np.concatenate((x, xt))
            y = np.concatenate((y, yt))

        data = np.concatenate((x[:, np.newaxis], y[:, np.newaxis]), axis=1)

        # minV = np.min(data, axis=0)
        # maxV = np.max(data, axis=0)
        # data = (data - minV) / (maxV - minV)

        plt.plot(data[:, 0], data[:, 1], 'x')
        plt.xlabel("x_axis")
        plt.ylabel("y_axis")
        plt.show()

        return data

    def createRow(self, sqlconn, des, fileName, nRows, nCols, className, trueCentresFileName, nClusters, name):

        des = des.strip()     # remove prefix/suffix white spaces

        rtup = [des, fileName,
                nRows, nCols,
                className,
                nClusters, trueCentresFileName,
                name]
        # build a sql statement
        sql = "INSERT INTO tDataset( " \
              "des, fileName, " \
              "nRows, nCols, " \
              "className, " \
              "nClusters, trueCentresFileName, " \
              "name)" \
              "VALUES ( '%s', '%s', " \
              " %d, %d, " \
              " '%s', " \
              " %d, '%s', " \
              " '%s') " % tuple(rtup)

        newId = sqlconn.insertRetId(sql)
        if newId is None:
            sqlconn.logger.write('tDataset.createRow: failed to insert..' + str(rtup))
            return None
        return newId

    def saveDBForDataset(self, data, des, fileName, nClusters, name, haveLabels=False, confirm=False):
        date = MyTools.getDatetimeFileName()
        fileName = date + '_' + fileName

        if haveLabels:
            # self.visualization(data[:, 1:])
            self.visual2D(data[:, 1:])
        else:
            # self.visualization(data)
            self.visual2D(data)

        if confirm:
            yes = 'y'
        else:
            yes = input('Want to save to DB?: ')

        if yes == 'y':
            sqlConn = MySqlConn(DBConfig("loadSaveDB"))
            nRows = data.shape[0]
            nCols = data.shape[1]

            # create fake labels cols
            if not haveLabels:
                labels = np.ones(nRows)
                labels[:nClusters] = range(nClusters)
                labels = labels.reshape(-1, 1)
                data = np.concatenate((labels, data), axis=1)
                fileName = 'withfakelabels_' + fileName
            else:
                nCols = nCols - 1
                labels = data[:, 0]
                trueCluster = len(np.unique(labels))
                if nClusters != trueCluster:
                    nClusters = trueCluster
                    print('nClusters(Number of clusters) corrected based on labels')

            # save to data dir
            np.savetxt('../data/' + fileName, data, delimiter=',')

            # create fake trueCentersFile, and save to data dir
            tcsfilename = 'fake_tcs_' + fileName
            np.savetxt('../data/' + tcsfilename, data[:nClusters, 1:].tolist(), delimiter=',')

            newId = self.createRow(sqlConn, des, fileName, nRows, nCols, 'Dataset', tcsfilename, nClusters, name)

            if newId is None:
                print('save fail')
                return None
            else:
                print('dataset have been save into DB, its id is ', newId)
                return newId

    def loadDataFromLocal(self, fileName, isNor, skiprows=0, skipcols=0):
        data = np.loadtxt(open('../externel_data/' + fileName, "rb"), delimiter=",", skiprows=skiprows)
        data = np.delete(data, skipcols, axis=1)

        # normalize
        if isNor:
            minV = np.min(data, axis=0)
            maxV = np.max(data, axis=0)
            dV = maxV - minV
            idx = np.where(dV == 0)[0]
            dV[idx] = 1
            data = (data - minV) / dV
            # data = np.delete(data, idx, axis=1)

        return data


if __name__ == '__main__':
    fileName = 'Aggregation.csv'
    nClusters = 7
    name = 'Aggregation'
    isNor = False
    haveLabels = False
    skiprows = 0
    skipcols = [2]
    des = "s"

    lsd = loadSaveDB()
    data = lsd.loadDataFromLocal(fileName, isNor, skiprows, skipcols)
    lsd.saveDBForDataset(data=data, des=des, fileName=fileName, nClusters=nClusters, name=name, haveLabels=haveLabels)
