import sys
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing  


from PySide6.QtWidgets import  QDialog, QTabWidget, QVBoxLayout
from PySide6.QtCore import Qt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans


def dmaxIndex(Mat, K):
    index = 0
    dmax = Mat[0]
    for i in range(K):
        if dmax < Mat[i]:
            dmax = Mat[i]
            index = i
    return dmax, index


def localPCA(X):

    row, col = np.shape(X)
    vWeights = np.ones((1, row), dtype=int)
    U, sigma, coeff = np.linalg.svd(X)

    maxarr = []
    absCoff = abs(coeff)
    nr, nc = np.shape(coeff)

    for i in range(nc):
        AbsT = absCoff[i, :]
        T = coeff[i, :]
        dmax, index = dmaxIndex(AbsT, nr)
        maxarr.append(T[index])
    colsign = np.sign(maxarr)
    c =  np.transpose(coeff) * colsign

    return U, sigma, c


def meanX(dataX):
    return np.mean(dataX,axis=0)#axis=0表示按照列来求均值，如果输入list,则axis=1

def dminIndex(dist, K):
    index = 0
    dmin = dist[0]
    for j in range(K):
        if dmin > dist[j]:
            dmin = dist[j]
            index = j
    return dmin, index


def MyKMeans(data ,K ,iniCentriods ,iterations): 

    numOfData, numOfAttr = np.shape(data)
    c, t = np.shape(iniCentriods)
    
    centroids = iniCentriods.copy()
    pre_centroids = iniCentriods.copy()
    ##
    for iter in range(iterations):
        tags = np.zeros((numOfData, K))
        ## 
        for i in range(numOfData):
            D = np.zeros((K))
            Dist = D
            # 
            for j in range(K):
                DT = data[i, :]
                CT = centroids[j,:]
                N = DT - CT
                d = np.linalg.norm(N, 2)
                Dist[j] = d
            dist, index = dminIndex(Dist, K)
            tags[i, index] = 1
        ## 
        for i in range(K):
            ti = tags[:, i]
            st = sum(ti)
            if st != 0:
                # 
                for j in range(numOfAttr):
                    dj = data[:, j]
                    mp = ti * dj
                    sm = sum(mp)
                    centroids[i, j] = sm / st
            else:
                randidx = np.random.permutation(numOfData)
                centroids[i, :] = data[randidx[0], :]
                tags[randidx,:] = 0
                tags[randidx,i] = 1
        p_c = pre_centroids - centroids
        p = np.array(p_c, dtype=np.float64)
        n = np.linalg.norm(p_c.T, 2)
        if n < 0.001:
            break

    ##
    Distance = np.zeros((numOfData))
    Idx = np.zeros((numOfData), dtype=int)
    for i in range(numOfData):
        D = np.zeros((K))
        Dist = D
        # 
        for j in range(K):
            Dist[j] = np.linalg.norm(data[i, :] - centroids[j,:], 2)
        distance, idx = dminIndex(Dist, K)
        Distance[i] = distance
        Idx[i] = idx
    Distance = np.sum(Distance, 1-1)
    ## 
    return Idx, centroids, Distance


class PCAWidget(QDialog):
    def __init__(self, dataReader):
        super().__init__()
        self.setWindowTitle("Data Analysis")
        self.setGeometry(100, 100, 800, 600)

        # Create a tab widget to display the plots
        layout = QVBoxLayout(self)
        self.setLayout(layout)
        self.tab_widget = QTabWidget(self)
        layout.addWidget(self.tab_widget)

        sData = dataReader.seriesData[0]
        D0 = sData[1]
        length = []
        for l in D0:
            length.append(l[1])

        data = []
        titles = []
        for idx, (title , sd) in enumerate(dataReader.seriesData):
            if idx % 3 == 0:
                titles.append(title)
            dd = []
            for d in sd:
                dd.append(d[2])
            data.append(dd)
        
        # Plot the data
        fig1, ax1 = plt.subplots()
        for index, d in enumerate(data):
            if index % 3 == 0:
                ax1.plot(length, d)
        ax1.legend(titles)
        ax1.set_xlabel('Length')
        ax1.set_ylabel('Value')
            # add fig to tab
        self.tab_widget.addTab(fig1.canvas, 'ori data')
        #fig1.show(False)

        # orgin data normalized
        data_process = np.column_stack(data)
        data_std = StandardScaler().fit_transform(data_process)
        x_std = []
        r, c = np.shape(data_std)
        for i in range(c):
            col = data_std[:, i]
            x_std.append(col)

        # Plot the norm data
        fig2, ax2 = plt.subplots()
        for i, d in enumerate(x_std):
            if i % 3 == 0:
                ax2.plot(length, d)
        ax2.legend(titles)
        ax2.set_xlabel('Length')
        ax2.set_ylabel('Value')
        # add fig to tab
        self.tab_widget.addTab(fig2.canvas, 'norm data')
    
        (m, n) = np.shape(data_process)
        (m_std, n_std) = np.shape(data_std)

            #fit_data = pca.fit_transform(data_process)
        U, sigma, coff = localPCA(data_std)

            # 下面计算块需要优化
        T = data_process.T
        C = coff.T
        fit_data = np.matmul(C, T)
        prin1 = fit_data[0].T
        prin2 = fit_data[1].T
        prin3 = fit_data[2].T

        # Plot the principal components of the origin data 
        fig3 = plt.figure()
        ax3 = fig3.add_subplot(111)
        ax3.plot(length, prin1, length, prin2, length, prin3)
        ax3.legend(['prin1', 'prin2', 'prin3'])
        ax3.set_xlabel('Length')
        ax3.set_ylabel('Value')
        self.tab_widget.addTab(fig3.canvas, 'ori pca')

        fit_data_std = np.matmul(C , data_std.T)
        prin1_std = fit_data_std[0].T
        prin2_std = fit_data_std[1].T
        prin3_std = fit_data_std[2].T

        # Plot the principal components of the norm data
        fig4 = plt.figure()
        ax4 = fig4.add_subplot(111)
        ax4.plot(length, prin1_std, length, prin2_std, length, prin3_std)
        ax4.legend(['prin1_std', 'prin2_std', 'prin3_std'])
        ax4.set_xlabel('Length')
        ax4.set_ylabel('Value')
        self.tab_widget.addTab(fig4.canvas, 'std pca')

        # k-means
        K = 5
        prin_mat = np.column_stack((prin1_std, prin2_std, prin3_std))
        numOfData, numOfAttr = np.shape(prin_mat)
        centroids = np.zeros((K, numOfAttr))
        maxAttr = np.zeros(numOfAttr)
        minAttr = np.zeros(numOfAttr)
        for i in range(numOfAttr):
            vmax = np.max(prin_mat[:, i])
            vmin = np.min(prin_mat[:, i])
            maxAttr[i] = vmax
            minAttr[i] = vmin
            for j in range(K):
                val = vmax + (vmin - vmax) #* random.random()
                centroids[j][i] = val
        Idx, C, Distance = MyKMeans(prin_mat, K, centroids, 10)

        # Plot the kmeans
        fig5 = plt.figure()
        ax5 = fig5.add_subplot(111)
        ax5.plot(length, Idx)
        ax5.legend(['Index'])
        ax5.set_xlabel('Lenght')
        ax5.set_ylabel('Index')
        self.tab_widget.addTab(fig5.canvas, 'KMeans')
        
            # Perform clustering
            # kmeans = KMeans(n_clusters=6)
            # kmeans.fit(prin1_std.reshape(-1, 1))
            # idx = kmeans.labels_

            # Create a table to display the clustering results
            #table = QTableWidget()
            #table.setColumnCount(2)
            # table.setRowCount(len(idx))
            # table.setHorizontalHeaderLabels(['Length', 'Cluster'])
            #for i, (l, c) in enumerate(zip(length, idx)):
            #    table.setItem(i, 0, QTableWidgetItem(str(l)))
            #    table.setItem(i, 1, QTableWidgetItem(str(c)))
            #table.horizontalHeader().setStretchLastSection(True)
            #table.verticalHeader().setVisible(False)
            #table.setSelectionBehavior(QAbstractItemView.SelectRows)
            #table.setEditTriggers(QAbstractItemView.NoEditTriggers)

            # Add the plots and table to the tab
            #layout = QVBoxLayout()
            #self.tab_widget.addWidget(table)
            #tab.setLayout(layout)
