# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset,DataLoader

import utils
import  math
import numpy as np
# import data_loader
import argparse
from sklearn import preprocessing
from GlobalNet import GlobalNet
import scipy.io as sio
import os
import time
from sklearn import metrics
from torch.utils.data.sampler import SubsetRandomSampler

import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import copy

class BasicBlock(nn.Module):
    def __init__(self, in_planes, planes, conv_type , kernel_size,  stride=1):
        super(BasicBlock, self).__init__()
        self.expansion =1
        if conv_type == 1:
            if kernel_size == 3:  self.padding = 2
            elif kernel_size == 5: self.padding = 4
            self.Conv = nn.Sequential(          #DilConv
                nn.ReLU(inplace=False),  # 这里的stride不一定是1
                nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=self.padding,
                          dilation=2,
                          groups=in_planes, bias=False),
                nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, bias=False),
                nn.BatchNorm2d(planes, affine=False),    )

        elif conv_type == 2:
            if kernel_size == 3: self.padding = 1
            elif kernel_size == 5: self.padding = 2
            self.Conv = nn.Sequential(         #SepConv
                nn.ReLU(inplace=False),
                nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=self.padding,
                          groups=in_planes, bias=False),
                nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, bias=False),
                nn.BatchNorm2d(planes, affine=False),
            )
        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.ReLU(inplace=False),
                nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion * planes)
            )

    def forward(self, x):
        out = self.Conv(x)
        out += self.shortcut(x)
        # out = F.relu(out)
        return out

class BasicUnit(nn.Module):
    def __init__(self,  in_channel, out_channel , amount ,conv_type , kernel_size):   #数量是控制块的多少，输入的通道数和输出的通道数得出来
        super(BasicUnit, self).__init__()
        self.type = 1  #这里单独把type设置为1，暂时用不上
        self.inchannel = in_channel
        self.conv_type_ = conv_type
        self.kernel_size_ = kernel_size
        self.layer = self.make_layer(BasicBlock, out_channel, amount, conv_type , kernel_size,  stride=1)    #这里只需要使用输出通道控制后面

    def make_layer(self, block, channels, num_blocks, _conv_type_ , _kernel_size_,  stride):
        strides = [stride] + [1] * (num_blocks - 1)  # strides=[1,1]
        layers = []
        for stride in strides:
            layers.append(block(self.inchannel, channels,_conv_type_ , _kernel_size_, stride))
            self.inchannel = channels
        return nn.Sequential(*layers)

    def forward(self,x):
        out = self.layer(x)  ##128*3*32*32-》128*64*32*32

        return out

class PoolUnit(nn.Module):
    def __init__(self, pool_type):
        super(PoolUnit, self).__init__()
        self.type = 2    # 这里只代表类型为1，后面不一定用
        self.pool_type = pool_type

        if self.pool_type == 1:
            self.select_pooltype = nn.AvgPool2d(3, stride=2, padding=1)
              # 自适应，特征图减半
        elif self.pool_type == 2:
            self.select_pooltype = nn.MaxPool2d(3, stride=2, padding=1)

    def forward(self, x):
        out = self.select_pooltype(x)
        return out

class Matcifar(Dataset):
    def __init__(self, datasets):
        self.x_data = datasets['data'].transpose(3, 2, 1, 0)
        self.y_data = datasets['Labels']
        self.len = len(datasets['Labels'])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len

class FitnessEvaluate(object):
    def __init__(self):
        # 定义是否使用GPU
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        self.Best_acc = -1.0
        self.pre_epoch = 0

        self.cutout = False #放参数里去
        self.cutout_length = 2
        self.num_cut = 10
        self.EPOCH = 200
        self.BATCH_SIZE = 32
        self.num_class = 16

        # 定义损失函数
        criterion = nn.CrossEntropyLoss()  # 损失函数为交叉熵，多用于多分类问题
        self.criterion = criterion
        self.nTrain = 200  # 迁移到params里去
        self.nValid = 100  # 迁移到params里去
        self.grad_clip = 5
        self.lr = 0.025  #0.016
        self.weight_decay = 3e-4
        #初始化文件

    def zeroPadding_2D(self, old_matrix, roof, floor, left, right):
        new_matrix = np.pad(old_matrix, ((roof, floor), (left, right)), 'constant', constant_values=(0, 0))
        return new_matrix

    def zeroPadding_3D(self , old_matrix, roof, floor, left, right , pad_depth=0):
        new_matrix = np.lib.pad(old_matrix,((roof, floor), (left, right), (pad_depth, pad_depth)),
                                'constant', constant_values=0)
        return new_matrix
    def test_end(self):
        image_file = '/home/server/Pycharmproject/LSY/Design_YG/IN/Indian_pines_corrected.mat'
        label_file = '/home/server/Pycharmproject/LSY/Design_YG/IN/Indian_pines_gt.mat'
        image = sio.loadmat(image_file)  # shape=(145,145,200)
        Indian = image['indian_pines_corrected']  # shape=(145,145,200) ndarray
        label = sio.loadmat(label_file)
        GroundTruth = label['indian_pines_gt']  # shape=(145,145) ndarray
        Indian = (Indian - np.min(Indian)) / (np.max(Indian) - np.min(Indian))  # 数据归一化
        [nRow, nColumn, nBand] = Indian.shape  # 145 145 200

        num_class = int(np.max(GroundTruth))  # 16
        HSI_CLASSES = num_class  # 16

        HalfWidth = 16
        Wid = 2 * HalfWidth  # 32

        [row, col] = GroundTruth.shape  # 145 145

        # NotZeroMask = np.zeros([row, col])
        # NotZeroMask[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1] = 1#17:-16,17:-16
        # G = GroundTruth * NotZeroMask
        Indian = self.zeroPadding_3D(Indian , HalfWidth + 1, HalfWidth, HalfWidth + 1, HalfWidth )
        G = self.zeroPadding_2D(GroundTruth, HalfWidth + 1, HalfWidth, HalfWidth + 1, HalfWidth)
        [Row, Column] = np.nonzero(G)  # shape 7128 7128
        nSample = np.size(Row)  # 7128

        # inchannels=[200, 64, 64, 64, 64, 64]
        # outchannels=[64, 64, 64, 64, 64, 64]
        # pool_type=[1, 2, 1, 1, 1, 2]
        # BasicBlock_amount=[5, 6, 6, 6, 5, 5]
        # BU_outchannels=[64, 128, 128, 128, 256, 256]
        # BU_conv_type=[1, 1, 2, 2, 1, 1]
        # BU_kernel_size=[5, 5, 3, 3, 3, 3]
        # order_type=[1, 2, 1, 2, 2, 2]
        #---------------------------------IN-----------------------------------------------------------------
        inchannels=[200, 64, 64, 64, 256]
        outchannels=[64, 64, 64, 256, 256]
        pool_type=[2, 2, 2, 2, 2]
        BasicBlock_amount=[4, 7, 7, 5, 10]
        order_type=[1, 2, 2, 1, 2]
        BU_outchannels=[64, 64, 64, 256, 128]
        BU_conv_type=[2, 2, 2, 1, 2]
        BU_kernel_size=[3, 3, 3, 3, 3]
        encode_layers = []
        size = [None for _ in range(20)]
        size[0] = 32
        i = 0  # control size
        # ========================================================================================================
        for type in order_type:
            if type == 1:
                encode_layers.append(
                    BasicUnit(int(inchannels[i]), int(outchannels[i]), int(BasicBlock_amount[i]), int(BU_conv_type[i]),
                              int(BU_kernel_size[i])))
                i = i + 1
                size[i] = size[i - 1]
            elif type == 2:
                encode_layers.append(PoolUnit(int(pool_type[i])))
                i = i + 1
                size[i] = int(size[i - 1] / 2)
        encode_information = encode_layers
        End_out_channels = outchannels
        size = size
        net = GlobalNet(encode_information, End_out_channels, size, nBand, num_class).to(self.device)

        print("Waiting Test!"+ '\n')
        utils.load(net, './test_classification._model/Best_net.pth')
        predict_test = np.array([], dtype=np.int64)
        labels_test = np.array([], dtype=np.int64)
        imdb = {}
        imdb['data'] = np.zeros([2 * HalfWidth, 2 * HalfWidth, nBand, nSample], dtype=np.float32)
        imdb['Labels'] = np.zeros([nSample], dtype=np.int64)
        imdb['set'] = 3 * np.ones([nSample], dtype=np.int64)
        for iSample in range(nSample):
            imdb['data'][:, :, :, iSample] = Indian[Row[iSample] - HalfWidth: Row[iSample]
            + HalfWidth, Column[iSample] - HalfWidth:Column[iSample] + HalfWidth, :] #Indian[Row[63] - 16:Row[63]+16,Colum[63]-16:Column[63]+16,:]
            imdb['Labels'][iSample] = G[Row[iSample], Column[iSample]].astype(np.int64)
        imdb['Labels'] = imdb['Labels'] - 1
        for i in range(1):
            test_dataset = Matcifar(imdb)
            test_queue = torch.utils.data.DataLoader(test_dataset, batch_size=self.BATCH_SIZE, shuffle=False,
                                                     num_workers=0)  # batch_size = 50

            top1_test, objs_test, tar_test, pre_test = self.infer(test_queue, net)

            labels_test = np.append(labels_test, tar_test)
            predict_test = np.append(predict_test, pre_test)

        OA_ = metrics.accuracy_score(predict_test, labels_test)
        matrix_ = metrics.confusion_matrix(predict_test, labels_test)
        AA_, AA_mean_ = utils.AA_andEachClassAccuracy(matrix_)
        Kappa_ = metrics.cohen_kappa_score(predict_test, labels_test)

        print('The Best individual |test_datasets | top1.avg:%f, objs.avg:%f' % (top1_test, objs_test))
        print('OA：%f, AA_mean：%f, Kappa：%f ,Acuracy_per_class: %s' % (OA_, AA_mean_, Kappa_, AA_))

        labels_test = labels_test + 1
        predict_test = predict_test + 1
        MASK = G[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1]
        NEW_MASK = copy.deepcopy(MASK)
        NEW_MASK = np.hstack(NEW_MASK)
        i = 0
        for index , element in enumerate(NEW_MASK):
            if element!=0:
                NEW_MASK[index] = predict_test[i]  #predict_testlabels_test
                i = i + 1
            else:
                continue

        # predict_test = predict_test + 1
        # # prediction_ = copy.deepcopy(predict_test)
        # c = np.hstack(G)
        # k = 0
        # for index , element in enumerate(c):
        #     if element!=0:
        #         c[index] = predict_test[k]
        #         k = k + 1
        #     else:
        #         continue
        # c = np.vstack(c).reshape(145, 145)
        # c = c - 1
        # m = copy.deepcopy(GroundTruth)
        # m = m - 1
        # m[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1] = c[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1]
        # m = np.hstack(k)

        # x = np.ravel(int(k))  # （21025）
        # print x
        y = np.zeros((NEW_MASK.shape[0], 3))  # 21025，3

        for index, item in enumerate(NEW_MASK):
            if item == 0:
                y[index] = np.array([25, 25, 112]) / 255.  #8, 46, 84  #124, 252, 0 靛青
            if item == 1:
                y[index] = np.array([0, 0, 205]) / 255.
            if item == 2:
                y[index] = np.array([0, 0, 255]) / 255.
            if item == 3:
                y[index] = np.array([65, 105, 225]) / 255.
            if item == 4:
                y[index] = np.array([30, 144, 255]) / 255.
            if item == 5:
                y[index] = np.array([0, 191, 255]) / 255.
            if item == 6:
                y[index] = np.array([127, 255, 212]) / 255.
            if item == 7:
                y[index] = np.array([152, 251, 152]) / 255.
            if item == 8:
                y[index] = np.array([0, 255, 127]) / 255.
            if item == 9:
                y[index] = np.array([173, 255, 47]) / 255.
            if item == 10:
                y[index] = np.array([255, 255, 0]) / 255.
            if item == 11:
                y[index] = np.array([255, 153, 18]) / 255.
            if item == 12:
                y[index] = np.array([255, 97, 0]) / 255.
            if item == 13:
                y[index] = np.array([255, 69, 0]) / 255.
            if item == 14:
                y[index] = np.array([255, 0, 0]) / 255.
            if item == 15:
                y[index] = np.array([178, 34, 34]) / 255.
            if item == 16:
                y[index] = np.array([176, 23, 31]) / 255.

        y_re = np.reshape(y, (MASK.shape[0], MASK.shape[1], 3))
        # y_re = np.reshape(NEW_MASK, (GroundTruth.shape[0], GroundTruth.shape[1], 3))

        self.classification_map(y_re, MASK, 24,
                                "/home/server/Pycharmproject/LSY/Design_YG/Control_/run_Indian/ClassificationMap/test1.eps")

    def train(self , train_queue, model, optimizer):
        objs = utils.AvgrageMeter()
        top1 = utils.AvgrageMeter()
        tar = np.array([])
        pre = np.array([])
        model.train()
        for i, data in enumerate(train_queue):
            # scheduler.step()
            image_, label_ = data  # image torch.size([64,32,32,200]) label  32
            image_ = Variable(image_).to(self.device)
            label_ = Variable(label_).to(self.device)
            optimizer.zero_grad()
            n_ = image_.size(0)
            outputs_ = model(image_)
            loss_ = self.criterion(outputs_, label_)
            loss_.backward()
            nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
            optimizer.step()
            prec_, t_, p_ = utils.accuracy(outputs_, label_, topk=(1,))
            objs.update(loss_.item(), n_)
            top1.update(prec_[0].item(), n_)
            tar = np.append(tar, t_.data.cpu().numpy())
            pre = np.append(pre, p_.data.cpu().numpy())
        return top1.avg, objs.avg, tar, pre

    def infer(self , valid_queue, model):
        objs = utils.AvgrageMeter()
        top1 = utils.AvgrageMeter()
        model.eval()
        tar = np.array([])
        pre = np.array([])
        with torch.no_grad():
            for i, data in enumerate(valid_queue):
                image, label = data
                image = Variable(image).to(self.device)
                label = Variable(label).to(self.device)
                n = image.size(0)
                outputs =model(image)
                loss = self.criterion(outputs, label)
                prec1_, t_, p_ = utils.accuracy(outputs, label, topk=(1,))
                objs.update(loss.item(), n)
                top1.update(prec1_[0].item(), n)
                tar = np.append(tar, t_.data.cpu().numpy())
                pre = np.append(pre, p_.data.cpu().numpy())

        return top1.avg, objs.avg, tar, pre

    def classification_map(self, map, groundTruth, dpi, savePath):

        fig = plt.figure(frameon=False)
        fig.set_size_inches(groundTruth.shape[1] * 2.0 / dpi, groundTruth.shape[0] * 2.0 / dpi)

        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)
        fig.add_axes(ax)

        ax.imshow(map, aspect='auto')
        fig.savefig(savePath, dpi=dpi)

        return 0

    # def cal_results(self , matrix):
    #     shape = np.shape(matrix)
    #     number = 0
    #     sum = 0
    #     AA = np.zeros([shape[0]], dtype=np.float) #AA[0] = 0,
    #     for i in range(shape[0]):
    #         number += matrix[i, i] #number=0,672,
    #         if np.sum(matrix[i, :]) == 0:
    #             AA[i]= 0
    #         elif np.sum(matrix[i, :]) != 0:
    #             AA[i] = matrix[i, i] / np.sum(matrix[i, :])
    #         sum += np.sum(matrix[i, :]) * np.sum(matrix[:, i])#sum = 0
    #     OA = number / np.sum(matrix)
    #     AA_mean = np.mean(AA)
    #     pe = sum / (np.sum(matrix) ** 2)
    #     Kappa = (OA - pe) / (1 - pe)
    #     return OA, AA_mean, Kappa, AA

if __name__ == '__main__':
    fitness_evaluate = FitnessEvaluate()
    fitness_evaluate.test_end()
