
# Written by Jianfei Yin.

from view.endSrc.MySqlConn import MySqlConn
from view.endSrc.DBConfig import DBConfig
from view.endSrc.MyLogger import MyLogger
from view.endSrc.MyTools import MyTools

from numpy import genfromtxt    # read trueCentres (List<np.vector>) from csv files
import os

import random, json     # for making tests
import numpy as np      # for making tests


class tDataset:

    def setSqlConn(self, dbConn: MySqlConn):

        if dbConn is not None:
            self._dbconn = dbConn
            self._logger = dbConn.logger

    def __init__(self, dbconn: MySqlConn= None):

        self._dbconn = None
        self._logger = None
        self.setSqlConn(dbconn)

        # python variables for DB fields of tDataset
        self.id = None
        self.des = 'a dataset'
        self.fileName = None                # csv file, 1st col is labels
        self.nRows = None
        self.nCols = None
        self.className = None
        self.nClusters = None
        self.trueCentresFileName = None
        # for GaussianDataset,
        # meanList: [mean1_vector(numpy), mean2_vector(numpy), ...],
        # vmList: [ [[1,2],[3,4]](numpy square matrix), ...]
        self.mixedParams = None
        self.name = None

        # memory fields can be set and readed by users of tDataset
        self.m_dataset = None       # memory field from self.fileName
        self.m_labels = None        # memory field from self.fileName
        self.m_trueCentres = None   # memory field from self.trueCentresFileName

    def __str__(self):
        '''
            for unit testing
        :return: str
        '''

        tmp = '---------- tDataset----------------\n'
        tmp += 'id: ' + str(self.id) + '\n'
        tmp += 'des: ' + str(self.des) + '\n'
        tmp += 'fileName: ' + str(self.fileName) + '\n'
        tmp += 'm_dataset: ' + str(self.m_dataset) + '\n'

        tmp += 'nRows: ' + str(self.nRows) + '\n'
        tmp += 'nCols: ' + str(self.nCols) + '\n'
        tmp += 'className: ' + str(self.className) + '\n'
        tmp += 'nClusters: ' + str(self.nClusters) + '\n'

        tmp += 'trueCentresFileName: ' + str(self.trueCentresFileName) + '\n'
        tmp += 'trueCentres: ' + str(self.m_trueCentres) + '\n'

        tmp += 'mixedParams: ' + str(self.mixedParams) + '\n'
        tmp += 'name: ' + str(self.name) + '\n'
        return tmp

    def createTable(self):

        if self._dbconn.tableExists('tDataset'):
            print('exist..tDataset')
            return True

        # To make MySQL talbe name case sensitive:
        # http://dev.mysql.com/doc/refman/5.0/en/identifier-case-sensitivity.html
        # /etc/mysql/mysql.conf.d/mysqld.cnf
        # lower_case_table_names=2 (in the mysqld.cnf configuration file under [mysqld]).
        # sudo systemctl restart mysql.service

        sql = """CREATE TABLE tDataset(
                 id int primary key auto_increment not null,
                 des varchar(400),
                 fileName char(200) not null,
                 nRows int not null,
                 nCols int not null,
                 className char(200) NOT NULL,
                 nClusters int NOT NULL,
                 trueCentresFileName char(200) not null,
                 mixedParams JSON,
                 name char(200) not null) """


        ok = self._dbconn.createTable(sql)
        if not ok:
            self._logger.write('.....failed..create table: tDataset')
            return False
        return True

    def _createRow(self):
        # check NOT NULL fields, mixedParams must be objects of the dict class
        assert self.fileName is not None \
               and self.nRows is not None \
               and self.nCols is not None \
               and self.className is not None \
               and self.nClusters is not None \
               and self.trueCentresFileName is not None \
               and self.name is not None, 'some parameters must be NOT NULL'

        self.des = self.des.strip()     # remove prefix/suffix white spaces

        rtup = [self.des, self.fileName,
                self.nRows, self.nCols,
                self.className,
                self.nClusters, self.trueCentresFileName,
                self.name]
        # build a sql statement
        sql = "INSERT INTO tDataset( " \
              "des, fileName, " \
              "nRows, nCols, " \
              "className, " \
              "nClusters, trueCentresFileName, " \
              "name)" \
              "VALUES ( '%s', '%s', " \
              " %d, %d, " \
              " '%s', " \
              " %d, '%s', " \
              " '%s') " % tuple(rtup)

        if self.mixedParams is not None:
            rtup.append(MyTools.getJsonByDict(self.mixedParams))  # dump to JSON string from python.dict objects
            sql = "INSERT INTO tDataset( " \
                  "des, fileName, " \
                  "nRows, nCols, " \
                  "className, " \
                  "nClusters, trueCentresFileName, " \
                  "name, mixedParams)" \
                  "VALUES ( '%s', '%s', " \
                  " %d, %d, " \
                  " '%s', " \
                  " %d, '%s', " \
                  " '%s', '%s') " % tuple(rtup)


        newId = self._dbconn.insertRetId(sql)
        if newId is not None:
            self.id = newId
        else:
            self._logger.write('tDataset.createRow: failed to insert..' + str(rtup))
        return newId


    def _fromRow(self, row):
        self.id, self.des, self.fileName, \
        self.nRows, self.nCols, self.className, \
        self.nClusters, self.trueCentresFileName, \
        self.mixedParams, self.name = row

        self.des = self.des.strip()
        self.mixedParams = MyTools.getDicByJson(self.mixedParams)   # from JSON string to python dict

    def _readRowById(self, id):
        '''
          read a record from tDataset table, used internally
          it mainly read mixedParams JSON string.
        :param id:
        :return: True if ok
        '''

        rtup = (id)
        sql = "SELECT * FROM tDataset WHERE id=%d" % rtup
        rows = self._dbconn.read(sql)
        if rows is None or len(rows) == 0:
            self._logger.write('tDataset._readRowById failed to read row id= ' + str(id))
            return False

        self._fromRow(rows[0])      # read only one row
        return True

    def _checkMakeConsist(self, trueRows, trueCols, trueClustres):
        #                  id int primary key auto_increment not null,
        #                  des varchar(400),
        #                  fileName char(200) not null,
        #                  nRows int not null,      --check & make it consist with the file
        #                  nCols int not null,      --check & make it consist with the file
        #                  className char(200) NOT NULL,
        #                  nClusters int NOT NULL,  --check & make it consist with the file
        #                  trueCentresFileName char(200) not null,
        #                  mixedParams JSON,
        #                  name char(200) not null)
        if trueRows == self.nRows and \
           trueCols == self.nCols and \
           trueClustres == self.nClusters:
            return

        # updating database
        rtup = (trueRows, trueCols, trueClustres, self.id)
        sql = "UPDATE tDataset SET nRows=%d, nCols=%d, nClusters=%d " \
              "WHERE id=%d" % rtup

        assert self._dbconn.insDelUpd(sql), "tDataset.checkMakeConsist failed for id= "+str(self.id)

        # updating fields in memory, at last
        self.nRows = trueRows
        self.nCols = trueCols
        self.nClusters = trueClustres

    def readDataset(self, id):
        '''
            set self.m_dataset, self.m_labels,
                self.m_trueCentres from tDataset.fileName csv file

        :param id: id of the table tDataset
        :return:  True if success
                  False otherwise
        '''

        if not self._readRowById(id):
            return False

        #   id int primary key auto_increment not null,
        #   des varchar(400),
        #   fileName char(200) not null,            *** get this
        #   nRows int not null,      --check & make it consist with the file
        #   nCols int not null,      --check & make it consist with the file
        #   className char(200) NOT NULL,
        #   nClusters int NOT NULL,  --check & make it consist with the file
        #   trueCentresFileName char(200) not null, *** get this
        #   mixedParams JSON,                       *** get this
        #   name char(200) not null)

        pathPrefix = DBConfig().dataFilesRelPathPrefix

        #
        # read  dataset from file
        #
        dat = None
        labels= None
        nClusters = 0
        try:
            dat = genfromtxt(pathPrefix + self.fileName, delimiter=',')
            assert len(dat.shape) == 2, self.fileName + ', datset must have shape like (n,d)'

            labels = dat[:, 0]          # 1st column is the labels
            labels = labels.astype(int) # np.array(numpy.int64)
            # labels = labels.tolist()    # list of int, since we use np.unique
            dat = dat[:, 1:]            # dataset is starting 2nd column

        except Exception as e:
            self._logger.write('tDastset.readDataset..failed to read dataset from the file: ' + self.fileName)
            return False

        nRows = dat.shape[0]
        nCols = dat.shape[1]
        nClusters = len(np.unique(labels))
        self._checkMakeConsist(nRows, nCols, nClusters)  # check or write to db

        #
        # read trueCentres: list<np.vector>
        #
        tCenters = None
        try:

            tCenters = genfromtxt(pathPrefix + self.trueCentresFileName, delimiter=',') # use np.csv for high performance, instead of pd
        except Exception as e:
            self._logger.write('tDastset.readDataset..failed to read true centres from the file: ' + self.trueCentresFileName)
            return False

        if tCenters.ndim == 1 and tCenters.shape[0] == self.nCols:
            tCenters = np.array([tCenters])
        for t in tCenters:
            assert t.shape == (self.nCols, ), \
                'tDastset.readDataset..the dim of center=' + str(t) + \
                'must be ' + str(self.nCols) + ', but it is ' + str(t.shape)

        # return dat, labels, tCenters
        self.m_dataset = dat
        self.m_labels = labels
        self.m_trueCentres = tCenters

        return True

    def _setTrueCentres(self, dataset, labels, nClustres):

        nCols = dataset.shape[1]
        zero = np.zeros(nCols)          # zero vector
        means = [zero] * nClustres
        sums = [0] * nClustres

        # labels is like [2, 1, 3, 0]: b
        # dataset is indexed by [0, 1, 2, 3]: i
        for i, b in enumerate(labels):
            means[b] = means[b] + dataset[i]   # sum up for each clusterId
            sums[b] = sums[b] + 1

        for b in range(nClustres):
            means[b] = means[b] / sums[b]

        self.m_trueCentres = means  # list<np.vector>

    def setBaseInfo(self, datasetMat, labels, trueCentres):

        assert len(datasetMat.shape) == 2, 'dataset must has shape like (n,d)'
        self.nRows, self.nCols = datasetMat.shape
        self.m_dataset = datasetMat

        self.m_labels = np.asarray(labels).astype(int)
        nClusters = len(np.unique(self.m_labels))
        for b in self.m_labels:
            assert b < nClusters and b >= 0, 'all labels must be in [0, ' + str(nClusters) + ')'

        self.nClusters = nClusters

        # set true centres if not given
        self.m_trueCentres = trueCentres
        if trueCentres is None:
            self._setTrueCentres(self.m_dataset, self.m_labels, self.nClusters)


    def getDatasetNameList(self):

        sql = "SELECT id, name FROM tDataset"
        rows = self._dbconn.read(sql)

        if rows is None:
            self._logger.write('tDataset.getDatasetNameList..failed to read records')
            return None
        if len(rows) == 0:
            self._logger.write('tDataset.getDatasetNameList.. read nothing')

        return rows


    def getDatasetDetail(self, id):
        self._readRowById(id)


    def saveDataset(self, name, className, datasetMat, labels,
                    des=None, trueCentres=None, mixedParams= None):
        '''
            save dataset_n_by_d, labels to files, and
            automatically calculate self.trueCenters if trueCenters=None, and
            automatically self.nRows, self.nCols

        :param datasetMat: np.ndarray, shape is (n,d), n-records, d-numOfAttributes
        :param labels: list of [0, numOfClusters)
        :return: id if saved, None otherwise.
        '''

        #######################################################
        #   id int primary key auto_increment not null,
        #   des varchar(400),
        #   fileName char(200) not null,                -- write this
        #   nRows int not null,      -- set this
        #   nCols int not null,      -- set this
        #   className char(200) NOT NULL,
        #   nClusters int NOT NULL,  -- set this
        #   trueCentresFileName char(200) not null,     -- write this
        #   mixedParams JSON,
        #   name char(200) not null)

        #######################################################
        #  checks serveral params
        #

        # check clusterIds
        assert isinstance(labels, list) or \
               (isinstance(labels, np.ndarray) and len(labels.shape) == 1), \
               'labels must be list or np.ndarray with shape like (d, )'

        assert mixedParams is None or \
               isinstance(mixedParams, dict), 'mixedParams must be objects of the dict class'  # mixedParameters

        #######################################################
        #  set db fields
        #
        self.setBaseInfo(datasetMat, labels, trueCentres)

        self.name = name
        if name is None:
            self.name = 'ds'
        self.className = className
        self.mixedParams = mixedParams

        # make description if not given
        namePrefix = self.name + '_' + self.className + '_n' + str(self.nRows) + '_d' + str(self.nCols)
        self.des = des
        if des is None:
            self.des = namePrefix + '_nc' + str(self.nClusters)

        date = MyTools.getDatetimeFileName()
        self.fileName = namePrefix + '_ds_' + date + '.csv'
        self.trueCentresFileName = namePrefix + '_tcs_' + date + '.csv'

        pathPrefix = DBConfig().dataFilesRelPathPrefix

        #################################################
        #   save to files
        try:
            firstCol = self.m_labels[:, np.newaxis] # to a colum with shape (n, 1)
            ds = np.concatenate((firstCol, self.m_dataset), axis=1) # insert firstCol into m_dataset

            np.savetxt(pathPrefix + self.fileName, ds, delimiter=",")
            np.savetxt(pathPrefix + self.trueCentresFileName, self.m_trueCentres, delimiter=',')
        except Exception as e:
            self._logger.write('tDastset.saveDataset..failed to write to files: ' + str(e) + \
                               ', dataset is : ' + self.fileName + \
                               ', or trueCenters: ' + self.trueCentresFileName)
            return None

        ##################################################
        # create record at last.
        rowId = self._createRow()
        if rowId is not None:
            return rowId

        # failed to create a row
        self._logger.write('tDastset.saveDataset..WARNING..failed to create row for the dataset: ' + self.fileName)
        try:
            MyTools.removeFile(pathPrefix + self.fileName)
            MyTools.removeFile(pathPrefix + self.trueCentresFileName)
        except Exception as e:
            self._logger.write('tDastset.saveDataset..failed to remove files for unsuccessful creating row: ' + \
                               self.fileName + ', ' + self.trueCentresFileName)
        return None


    @staticmethod
    def testJSON_insert():

        conn = MySqlConn(DBConfig())

        data = tDataset(conn)
        data.createTable()

        data.des = 'testJSON'
        data.fileName = 'abc.csv'
        data.nRows = 10
        data.nCols = 5
        data.className = 'tDataset'
        data.nClusters = 2
        data.trueCentresFileName = 'efg.csv'

        # Gaussian mixed parameters
        mean = [random.uniform(1, 10) for i in range(5)]
        mean = np.asarray(mean)

        var = np.arange(25).reshape(5, 5)  # a 2 by 5 array
        var = var.tolist()  # nested lists with same data, indices
        # [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]

        print('var: ', var)

        meanList = [mean.tolist(), mean.tolist()]
        varList = [var, var]

        # file_path = "/path.json" ## your path variable
        # json.dump(b, codecs.open(file_path, 'w', encoding='utf-8'),
        #             separators=(',', ':'),
        #             sort_keys=True, indent=4) ### this saves the array in .json format

        dic = {}
        dic['meanList'] = meanList
        dic['varList'] = varList
        dic['overlap'] = 1.5

        data.mixedParams = dic

        data.name = 'testJSON_name'

        id = data._createRow()

        dataBack = tDataset(conn)
        dataBack.readDataset(id)
        print(dataBack)

    @staticmethod
    def testJSON_read():

        conn = MySqlConn(DBConfig())

        data = tDataset(conn)
        data.readDataset(3)

        print(type(data.mixedParams['overlap']))


    @staticmethod
    def testSaveDataset():

        dataset = np.asarray([[0.1, 1.2], [1.2, 10.2], [56.1343, 3434.2]])

        cfg = DBConfig()

        conn = MySqlConn(cfg)
        td = tDataset(conn)


        # Gaussian mixed parameters
        mean = [random.uniform(1, 10) for i in range(5)]
        mean = np.asarray(mean)

        var = np.arange(25).reshape(5, 5)  # a 2 by 5 array
        var = var.tolist()  # nested lists with same data, indices
        # [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]

        print('var: ', var)

        meanList = [mean.tolist(), mean.tolist()]
        varList = [var, var]

        # file_path = "/path.json" ## your path variable
        # json.dump(b, codecs.open(file_path, 'w', encoding='utf-8'),
        #             separators=(',', ':'),
        #             sort_keys=True, indent=4) ### this saves the array in .json format

        dic = {}
        dic['meanList'] = meanList
        dic['varList'] = varList
        dic['overlap'] = 1.5

        mixedParams = dic

        rid = td.saveDataset('tall', 'Dataset', dataset, [1, 0, 2])

        td2 = tDataset(conn)
        td2.readDataset(rid)

        print(td2)


if __name__ == '__main__':

    # tDataset.testJSON_insert()  # ok
    # tDataset.testJSON_read()

    tDataset.testSaveDataset()