import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.io as scio
import torch
import torch.nn as nn
import cv2
import time
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import cohen_kappa_score
from thop import profile
from func import load,product,preprocess
from network import ss_cnn, operate

## 指定显卡

USE_GPU=True
if USE_GPU:
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
else:
    device=torch.device('cpu')

print('using device:','0')

####################################load dataset(indian_pines & pavia_univ & sali & sali_a.)######################

a=load()

All_data,labeled_data,rows_num,categories,r,c,FLAG=a.load_data(flag='pavia')

#np.save('All_data.npy',All_data)

print('Data has been loaded successfully!')

#################################### 降维(3DCNN无此过程), 归一化 ######################
# 设置归一化范围
mi = -0.5
ma = 0.5
# a=preprocess('pca')#选择ICA 或 PCA
# Alldata_DR=a.Dim_reduction(All_data)
a = product(c, FLAG)
All_data_norm=a.normlization(All_data[:,1:-1],mi,ma)
image_data3D=All_data_norm.reshape(r,c,-1)

print(image_data3D.shape)

print('Dimension reduction successfully!')

#################################### 数据准备 ###################

half_s=13#Patch块尺寸-1的一半,手动指定

image_3d_lr=np.fliplr(image_data3D)
image_3d_ud=np.flipud(image_data3D)
image_3d_corner=np.fliplr(np.flipud(image_data3D))

image_3d_temp1=np.hstack((image_3d_corner,image_3d_ud,image_3d_corner))
image_3d_temp2=np.hstack((image_3d_lr,image_data3D,image_3d_lr))
image_3d_merge=np.vstack((image_3d_temp1,image_3d_temp2,image_3d_temp1))

image_3d_mat_origin=image_3d_merge[(r-half_s):2*r+half_s,(c-half_s):2*c+half_s,:]

print(image_3d_mat_origin.shape)

# plt.imshow(image_3d_mat_origin[:,:,30])
# plt.show()

print('image edge enhanced Finished!')

del image_3d_lr,image_3d_ud,image_3d_corner,image_3d_temp1,image_3d_temp2,image_3d_merge

#################################### 空间数据，训练、检验、测试、预测 ###########################

#生成训练样本
Experiment_result=np.zeros([categories+4,12])#OA,AA,kappa，重复10次实验

#实验次数
Experiment_num=10

for count in range(0,Experiment_num):

    a = product(c, FLAG)
    rows_num,trn_num,tes_num,pre_num=a.generation_num(labeled_data,rows_num,All_data)

    #################################### Training #####################################

    ############ label #############

    y_trn = All_data[trn_num, -1]
    trn_YY = torch.from_numpy(y_trn - 1)  # 考虑NLLLoss,标记从0开始

    ############ data  #############
    trn_spat,trn_num,_=a.production_data_trn(rows_num,trn_num,half_s,image_3d_mat_origin)

    trn_spat = trn_spat[:,:,:,:,np.newaxis]

    print('第{}次实验，Training spatial dataset preparation Finished!'.format(count))

    trn_XX_spat = torch.from_numpy(trn_spat.transpose(0, 4, 3, 1, 2))  # (N,C,Depth,H,W)

    del trn_spat

    torch.cuda.empty_cache()#GPU memory released

    trn_dataset=TensorDataset(trn_XX_spat,trn_YY)
    trn_loader=DataLoader(trn_dataset,batch_size=100,sampler=SubsetRandomSampler(range(trn_XX_spat.shape[0])))

    #config lr & epoch
    lr = 3e-3
    epoch = 400

    net = ss_cnn(FLAG,trn_XX_spat.shape[1],trn_XX_spat.shape[2], categories-1, init_weights=True)
    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(net)
    net = net.cuda()
    # net=nn.DataParallel(net,device_ids=[0,1])
    criterion = torch.nn.NLLLoss()  # 负对数似然损失函数（如果不算log_softmax则直接采用交叉熵损失函数)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr, weight_decay=1e-3)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[402], gamma=0.5)
    # optimizer=torch.optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-2, amsgrad=False)
    loss_trn = []

    trn_time1 = time.time()
    for i in range(1, epoch):
        b=operate()
        loss_trn = b.train(i, loss_trn, net, optimizer, scheduler, trn_loader, criterion)
    trn_time2 = time.time()

    #print(type(loss_trn))  ######CPU
    # plt.figure(1)
    # plt.plot(np.array(loss_trn), label='Training')
    # plt.legend()
    # plt.show()

    ##save training model
    torch.save(net,'3dcnn_'+str(FLAG)+'.pkl')

    print('第{}次实验，模型训练阶段完成！！'.format(count))

    ######################################### Testing ####################################

    ############ label #############

    y_tes = All_data[tes_num, -1]#label
    tes_YY = torch.from_numpy(y_tes - 1)

    ############ data  #############

    a = product(c, FLAG)

    tes_spat, tes_num = a.production_data_valtespre(tes_num, half_s, image_3d_mat_origin, flag='Tes')

    tes_spat=tes_spat[:,:,:,:,np.newaxis]


    print('第{}次实验，Testing spatial dataset preparation Finished!'.format(count))

    tes_XX_spat = torch.from_numpy(tes_spat.transpose(0, 4, 3, 1, 2))

    del tes_spat

    ################### 推断，测试集 ################

    tes_dataset=TensorDataset(tes_XX_spat,tes_YY)
    tes_loader=DataLoader(tes_dataset,batch_size=100)

    net=torch.load('3dcnn_'+str(FLAG)+'.pkl',map_location='cpu')
    #net=net.module#if use DataParallel

    net=net.cuda()
    #net=nn.DataParallel(net,device_ids=[0])
    #net=net.cpu()
    a=operate()

    tes_time1=time.time()
    y_pred_tes=a.inference(net,tes_loader,criterion,FLAG='TEST')
    tes_time2=time.time()

    ####################################### Assess, 测试集 ###########################################

    print('==================Test set=====================')
    y_tes = tes_YY.numpy() + 1
    print('第{}次实验，测试集OA={}'.format(count,np.mean(y_tes==y_pred_tes)))
    print('第{}次实验，测试集Kappa={}'.format(count,cohen_kappa_score(y_tes,y_pred_tes)))

    ########## 各类别精度

    num_tes=np.zeros([categories-1])
    num_tes_pred=np.zeros([categories-1])
    for k in y_tes:
        num_tes[k-1]=num_tes[k-1]+1
    for j in range(y_tes.shape[0]):
        if y_tes[j]==y_pred_tes[j]:
            num_tes_pred[y_tes[j]-1]=num_tes_pred[y_tes[j]-1]+1

    Acc=num_tes_pred/num_tes*100

    Experiment_result[0,count]=np.mean(y_tes==y_pred_tes)*100#OA
    Experiment_result[1,count]=np.mean(Acc)#AA
    Experiment_result[2,count]=cohen_kappa_score(y_tes,y_pred_tes)*100#Kappa
    Experiment_result[3, count] = trn_time2 - trn_time1
    Experiment_result[4, count] = tes_time2 - tes_time1
    Experiment_result[5:,count]=Acc

    print('第{}次实验，模型评估阶段完成！！!'.format(count))

########## 计算多次实验的均值与标准差并保存 #############

Experiment_result[:,-2]=np.mean(Experiment_result[:,0:-2],axis=1)
Experiment_result[:,-1]=np.std(Experiment_result[:,0:-2],axis=1)

scio.savemat('3dcnn_result_'+str(FLAG)+'.mat',{'data':Experiment_result})

np.save('trn_num_'+str(FLAG)+'.npy',trn_num)
np.save('pre_num_'+str(FLAG)+'.npy',pre_num)
np.save('y_trn_'+str(FLAG)+'.npy',y_trn)
np.save('image_3d_mat_origin_'+str(FLAG)+'.npy',image_3d_mat_origin)

#############  展示 ##############

# y_disp=np.zeros([All_data.shape[0]])
#
# y_disp[trn_num]=y_trn
# y_disp[tes_num]=y_pred_tes
#
# y_disp_gt=y_disp.copy()
# y_disp_gt[tes_num]=y_tes
#
# plt.subplots(figsize=[10,10])
# ax1=plt.subplot(1,2,1)
# plt.xlabel('TEST')
# a1=plt.imshow(y_disp.reshape(r,c),cmap='jet')
# plt.xticks([])
# plt.yticks([])
#
# ax2=plt.subplot(1,2,2)
# plt.xlabel('gt')
# a2=plt.imshow(y_disp_gt.reshape(r,c),cmap='jet')
# plt.xticks([])
# plt.yticks([])