import numpy as np
import scipy.io as scio
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import FastICA
from sklearn.decomposition import PCA

class load():
    # load dataset(indian_pines & pavia_univ.)
    def load_data(self,flag='indian'):
        if flag == 'indian':
            Ind_pines_dict = scio.loadmat('/data/di.wang/ordinary/23DCNN/Indian_pines.mat')
            Ind_pines_gt_dict = scio.loadmat('/data/di.wang/ordinary/23DCNN/Indian_pines_gt.mat')

            print(Ind_pines_dict['indian_pines'].shape)
            print(Ind_pines_gt_dict['indian_pines_gt'].shape)

            # remove the water absorption bands

            no_absorption = list(set(np.arange(0, 103)) | set(np.arange(108, 149)) | set(np.arange(163, 219)))

            original = Ind_pines_dict['indian_pines'][:, :, no_absorption].reshape(145 * 145, 200)

            print(original.shape)
            print('Remove wate absorption bands successfully!')

            gt = Ind_pines_gt_dict['indian_pines_gt'].reshape(145 * 145, 1)

            r = Ind_pines_dict['indian_pines'].shape[0]
            c = Ind_pines_dict['indian_pines'].shape[1]
            categories = 17
        if flag == 'pavia':
            pav_univ_dict = scio.loadmat('/data/di.wang/ordinary/23DCNN/PaviaU.mat')
            pav_univ_gt_dict = scio.loadmat('/data/di.wang/ordinary/23DCNN/PaviaU_gt.mat')

            print(pav_univ_dict['paviaU'].shape)
            print(pav_univ_gt_dict['paviaU_gt'].shape)

            original = pav_univ_dict['paviaU'].reshape(610 * 340, 103)
            gt = pav_univ_gt_dict['paviaU_gt'].reshape(610 * 340, 1)

            r = pav_univ_dict['paviaU'].shape[0]
            c = pav_univ_dict['paviaU'].shape[1]
            categories = 10
        if flag == 'ksc':
            ksc_dict = scio.loadmat('/data/di.wang/ordinary/23DCNN/KSC.mat')
            ksc_gt_dict=scio.loadmat('/data/di.wang/ordinary/23DCNN/KSC_gt.mat')

            print(ksc_dict['KSC'].shape)
            print(ksc_gt_dict['KSC_gt'].shape)

            original = ksc_dict['KSC'].reshape(512 * 614, 176)
            original[original>400]=0
            gt = ksc_gt_dict['KSC_gt'].reshape(512 * 614, 1)

            r = ksc_dict['KSC'].shape[0]
            c = ksc_dict['KSC'].shape[1]
            categories = 14
        if flag == 'sali':
            salinas_dict = scio.loadmat('Salinas.mat')
            salinas_gt_dict = scio.loadmat('Salinas_gt.mat')

            print(salinas_dict['salinas'].shape)
            print(salinas_gt_dict['salinas_gt'].shape)

            original = salinas_dict['salinas'].reshape(512 * 217, 224)
            gt = salinas_gt_dict['salinas_gt'].reshape(512 * 217, 1)

            r = salinas_dict['salinas'].shape[0]
            c = salinas_dict['salinas'].shape[1]
            categories = 17
        if flag == 'sali_a':  # special! label:1,10,11,12,13,14
            salinas_a_dict = scio.loadmat('SalinasA.mat')
            salinas_a_gt_dict = scio.loadmat('SalinasA_gt.mat')

            print(salinas_a_dict['salinasA'].shape)
            print(salinas_a_gt_dict['salinasA_gt'].shape)

            original = salinas_a_dict['salinasA'].reshape(83 * 86, 224)
            gt = salinas_a_gt_dict['salinasA_gt'].reshape(83 * 86, 1)

            r = salinas_a_dict['salinasA'].shape[0]
            c = salinas_a_dict['salinasA'].shape[1]
            categories = 7

        rows = np.arange(gt.shape[0])  # 从0开始
        # 行号(ID)，特征数据，类别号
        All_data = np.c_[rows, original, gt]

        # 剔除非0类别，获取所有labeled数据
        labeled_data = All_data[All_data[:, -1] != 0, :]
        rows_num = labeled_data[:, 0]  # 所有labeled数据的ID

        return All_data, labeled_data, rows_num, categories, r, c, flag
    ##无放回抽样(selected without replacement)
    def sampling(self,All_data,categories):
        K=10##ICA的分组
        M=categories-1#每组多少个
        origin=All_data[:,1:-1]
        bands=np.arange(origin.shape[1])
        ensumb_num={}#波段号
        ensumb_feature_set={}
        for i in range(K):
            ensumb_num[str(i+1)]=np.random.choice(bands,M,replace=False)
            idx=[j for j in range(len(bands)) if bands[j] in ensumb_num[str(i+1)]]
            bands=np.delete(bands,idx)#删除idx处的值
        for i in range(K):
            ensumb_feature_set[str(i+1)]=origin[:,ensumb_num[str(i+1)]]
        return ensumb_feature_set,K,M

class product():
    def __init__(self,flag):
        self.flag=flag
    # product the training and testing pixel ID
    def generation_num(self,labeled_data, rows_num, All_data):

        train_num = []

        for i in np.unique(labeled_data[:, -1]):
            temp = labeled_data[labeled_data[:, -1] == i, :]
            temp_num = temp[:, 0]  # 某类别的所有ID
            #print(i, temp_num.shape[0])
            np.random.shuffle(temp_num)  # 打乱顺序
            if self.flag == 'indian':
                if i == 1:
                    train_num.append(temp_num[0:33])
                elif i == 7:
                    train_num.append(temp_num[0:20])
                elif i == 9:
                    train_num.append(temp_num[0:14])
                elif i == 16:
                    train_num.append(temp_num[0:75])
                else:
                    train_num.append(temp_num[0:100])
            if self.flag == 'pavia':
                train_num.append(temp_num[0:100])
            if self.flag == 'ksc':
                if i==1:
                    train_num.append(temp_num[0:33])
                elif i==2:
                    train_num.append(temp_num[0:23])
                elif i==3:
                    train_num.append(temp_num[0:24])
                elif i==4:
                    train_num.append(temp_num[0:24])
                elif i==5:
                    train_num.append(temp_num[0:15])
                elif i==6:
                    train_num.append(temp_num[0:22])
                elif i==7:
                    train_num.append(temp_num[0:9])
                elif i==8:
                    train_num.append(temp_num[0:38])
                elif i==9:
                    train_num.append(temp_num[0:51])
                elif i==10:
                    train_num.append(temp_num[0:39])
                elif i==11:
                    train_num.append(temp_num[0:41])
                elif i==12:
                    train_num.append(temp_num[0:49])
                elif i==13:
                    train_num.append(temp_num[0:91])
        #             else:
        #                 train_num.append(temp_num[0:int(temp.shape[0]*0.1)])

        trn_num = [x for j in train_num for x in j]  # 合并list中各元素
        tes_num = list(set(rows_num) - set(trn_num))
        pre_num = list(set(range(0, All_data.shape[0])) - set(trn_num))
        print('number of training sample', len(trn_num))
        return rows_num, trn_num, tes_num, pre_num

    def normlization(self, data_spat, mi, ma):

        scaler = MinMaxScaler(feature_range=(mi, ma))

        spat_data = data_spat.reshape(-1, data_spat.shape[-1])
        data_spat_new = scaler.fit_transform(spat_data).reshape(data_spat.shape)

        print('Dataset normalization Finished!')
        return data_spat_new


class preprocess():
    def __init__(self,t):
        self.transform=t
    def Dim_reduction(self, All_data):

        Alldata_DR=All_data

        # if self.transform =='ica':
        #     ica_data_pre = All_data[:, 1:-1]
        #     print(ica_data_pre.shape)
        #     transformer = FastICA(n_components=50, whiten=True, random_state=None)
        #     fastica_data = transformer.fit_transform(ica_data_pre)
        #     print(fastica_data.shape)
        #
        #     Alldata_DR = fastica_data
        #
        #     print('ICA Finished!')

        if self.transform =='pca':
            pca_data_pre = All_data[:, 1:-1]
            print(pca_data_pre.shape)
            pca_transformer = PCA(n_components=1)
            pca_data = pca_transformer.fit_transform(All_data[:, 1:-1])
            print(pca_data.shape)

            Alldata_DR = pca_data

            print('PCA Finished!')

        return Alldata_DR