# -*- coding: utf-8 -*-
"""
Created on Thu Sep  7 14:07:34 2023

@author: LiHao
"""

import torch
import torch.nn as nn
import torch.optim as optim

import loss as loss_func
import numpy as np
import network
import math
import scipy.io as scio
import torch.utils.data as Data


batch_size = 32
lr = 0.01
momentum = 0.9
l2_decay = 5e-4
epochs = 20
epochs2 = 20
log_interval = 40
class_num = 8
cuda = True if torch.cuda.is_available() else False
method = 'DANN'
ps = 0.2

res_test_err = []
res_test_acc = []

''' 导入数据 '''
 # 源域-训练集
dataFile = r'..\data_target\1#\X_train.mat'
data = scio.loadmat(dataFile)
x_train_s = data['X_train']
x_train_s = np.transpose(x_train_s)
x_train_s = x_train_s.reshape(-1, 1, 8192) 

dataFile = r'..\data_target\1#\train_Y.mat'
data = scio.loadmat(dataFile)
y_train_s = data['train_Y']
y_train_s = np.squeeze(y_train_s)
y_train_s = y_train_s - 1

# 目标域-训练集
dataFile = r'..\data_target\2#\X_train.mat'
data = scio.loadmat(dataFile)
x_train_t = data['X_train']
x_train_t = np.transpose(x_train_t)
x_train_t = x_train_t.reshape(-1, 1, 8192)       # reshape

dataFile = r'..\data_target\2#\train_Y.mat'
data = scio.loadmat(dataFile)
y_train_t = data['train_Y']
y_train_t = np.squeeze(y_train_t)
y_train_t = y_train_t - 1 
#roc_y = y_test

# 目标域-测试集
dataFile = r'..\data_target\2#\X_test.mat'
data = scio.loadmat(dataFile)
x_test = data['X_test']
x_test = np.transpose(x_test)
x_test = x_test.reshape(-1, 1, 8192)       # reshape
       
dataFile = r'..\data_target\2#\test_Y.mat'
data = scio.loadmat(dataFile)
y_test = data['test_Y']
y_test = np.squeeze(y_test)
y_test = y_test - 1 

x_train_s1 = torch.from_numpy(x_train_s).type(torch.FloatTensor)  
y_train_s1 = torch.from_numpy(y_train_s).type(torch.LongTensor) 

x_train_t1 = torch.from_numpy(x_train_t).type(torch.FloatTensor)  
y_train_t1 = torch.from_numpy(y_train_t).type(torch.LongTensor) 

x_test = torch.from_numpy(x_test).type(torch.FloatTensor)
y_test = torch.from_numpy(y_test).type(torch.LongTensor) 

sourcedata = Data.TensorDataset(x_train_s1, y_train_s1)
test_traindata = Data.TensorDataset(x_train_t1, y_train_t1)
testdata = Data.TensorDataset(x_test, y_test)

source_loader = Data.DataLoader(sourcedata, batch_size=batch_size, shuffle=True, drop_last=True)
target_loader = Data.DataLoader(test_traindata, batch_size=batch_size, shuffle=True, drop_last=True)
target_test_loader = Data.DataLoader(testdata, batch_size=batch_size, shuffle=True, drop_last=True)

max_iters = len(source_loader) * epochs

def data_loader(src_x, src_y, tar_x, tar_y):
    
    src_x = torch.tensor(src_x).type(torch.FloatTensor)  
    src_y = torch.tensor(src_y).type(torch.LongTensor) 
    tar_x = torch.tensor(tar_x).type(torch.FloatTensor)  
    tar_y = torch.tensor(tar_y).type(torch.LongTensor) 
    src_data = Data.TensorDataset(src_x, src_y)
    tar_data = Data.TensorDataset(tar_x, tar_y)
    src_loader = Data.DataLoader(src_data, batch_size=batch_size, shuffle=True, drop_last=True)
    tar_loader = Data.DataLoader(tar_data, batch_size=batch_size, shuffle=True, drop_last=True)
    return src_loader, tar_loader

def train(model, ad_net, random_layer, epoch, method):

    learning_rate = lr/math.pow((1 + 10*(epoch-1)/epochs), 0.75)
    optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=l2_decay, nesterov=True)
    optimizer_ad = optim.SGD(ad_net.parameters(), lr=learning_rate, momentum=momentum, weight_decay=l2_decay, nesterov=True)
    model.train()
    iter_source = iter(source_loader)
    iter_target = iter(target_loader)
    num_iter = len(source_loader)
    iter_num = (epoch-1) * num_iter
    for i in range(1, num_iter):
        data_source, label_source = iter_source.next()
        data_target, _ = iter_target.next()
        if i % len(target_loader) == 0:
            iter_target = iter(target_loader)
        if cuda:
            data_source, label_source = data_source.cuda(), label_source.cuda()
            data_target = data_target.cuda()
       
        ### 
        iter_num += 1
        ###
        optimizer.zero_grad()
        optimizer_ad.zero_grad()
        
        inputs = torch.cat((data_source, data_target), dim=0)
        features, outputs = model(inputs)
        output_s = outputs[:batch_size]
        output_t = outputs[batch_size:]
        softmax_out = nn.Softmax(dim=1)(outputs)
        
        transfer_loss = loss_func.DANN([features, softmax_out], ad_net, None, None)
            
        # classifier_loss = nn.CrossEntropyLoss()(output_s, label_source)
        classifier_loss = loss_func.CrossEntropyLabelSmooth(num_classes = class_num)(output_s, label_source)
        loss = classifier_loss + 0*transfer_loss
        
        loss.backward()
        optimizer.step()
        optimizer_ad.step()
        if  i % log_interval == 0:
            print('Train Epoch: {}/[{}/{} ({:.0f}%)] Loss: {:.4f} cls_loss: {:.4f} Adv_loss: {:.4f}'.format(
                    epoch, i*len(data_source), len(source_loader.dataset), 100.*i/len(source_loader),
                    loss.item(), classifier_loss.item(), transfer_loss.item()))

            
def test(model):
    model.eval()
    test_loss = 0
    correct = 0
    criterion = nn.CrossEntropyLoss()
    
    y_pre = torch.zeros(720)
    y_target = torch.zeros(720)
    i = 0
      
    with torch.no_grad():
        for data, target in target_test_loader:
            if cuda:
                data, target = data.cuda(), target.cuda()
            _, label_pred = model(data)
            # print(label_pred.size())
            # print(target.size())

            test_loss += criterion(label_pred, target)
            pred = label_pred.data.max(1)[1]
            correct += pred.eq(target.data).cpu().sum()
            
            # print('--------------------')
            # print(pred.data.cpu())
            y_pre[i*32:i*32+32] = pred.data.cpu()
            y_target[i*32:i*32+32] = target.data.cpu()
        
        test_loss /= len(target_test_loader.dataset)
        acc = float(correct) *100./ len(target_test_loader.dataset)
        # 保存结果
        res_test_err.append(test_loss)
        res_test_acc.append(acc)
        
        print('Test set: Ave loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
                test_loss, correct, len(target_test_loader.dataset),
            acc))            
            
def test1(model):
    model.eval()
    test_loss = 0
    correct = 0
    criterion = nn.CrossEntropyLoss()
    
    y_pre = torch.zeros(720)
    y_target = torch.zeros(720)
    i = 0
      
    with torch.no_grad():
        for data, target in target_test_loader:
            if cuda:
                data, target = data.cuda(), target.cuda()
            _, label_pred = model(data)
            # print(label_pred.size())
            # print(target.size())

            test_loss += criterion(label_pred, target)
            pred = label_pred.data.max(1)[1]
            correct += pred.eq(target.data).cpu().sum()
            
            # print('--------------------')
            # print(pred.data.cpu())
            y_pre[i*32:i*32+32] = pred.data.cpu()
            y_target[i*32:i*32+32] = target.data.cpu()
            
            i = i+1
        
        import numpy as np
        import pandas as pd
        from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
        import matplotlib.pyplot as plt
        import seaborn as sns
        # class_label = ['Normal', 'OuterCrack1mm', 'OuterCrack4mm', 'OuterCrack8mm', 'OuterCrack12mm']
        class_label = ['太阳轮点蚀', '太阳轮裂纹', '行星轮点蚀', '行星轮裂纹']
        # class_label = ['Normal', 'OuterCrack1mm', 'OuterCrack4mm', 'OuterCrack8mm', 'OuterCrack12mm', 'innerCrack8mm', 'outerpitting8mm']

        ## 计算混淆矩阵并可视化
        plt.figure(figsize=(10,8))
        # print(y_target)
        # print(y_pre)
        conf_mat = confusion_matrix(y_target,y_pre)
        df_cm = pd.DataFrame(conf_mat, index=class_label,
                             columns=class_label)
        sns.set(font_scale=1.5)
        heatmap = sns.heatmap(df_cm, annot=True, fmt="d",cmap="YlGnBu")
        heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right')
        heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=20, ha='right')
        font2 = {'family' : 'SimHei',    'weight' : 'normal',    'size' : 24,}
        plt.ylabel('真实标签', font2)
        plt.xlabel('预测标签', font2)
        plt.show()

        # plt.rcParams两行是用于解决标签不能显示汉字的问题
        plt.rcParams['font.sans-serif']=['SimHei']
        plt.rcParams['axes.unicode_minus'] = False


        # torch.set_printoptions(profile=True)
        # abc = open('parameter1.txt','w')
        # para = list(myconvnet.parameters())
        # #print(para)
        # #print(para,file=abc)
        # abc.close()
        
        test_loss /= len(target_test_loader.dataset)
        acc = float(correct) *100./ len(target_test_loader.dataset)
        # 保存结果
        res_test_err.append(test_loss)
        res_test_acc.append(acc)
        
        print('Test set: Ave loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
                test_loss, correct, len(target_test_loader.dataset),
            acc))      
        
if __name__ == '__main__':
    model = network.BasedCNN()
    
    ad_net = network.AdversarialNetwork(512, 256, max_iters)
    # ad_net = network.AdversarialNetwork(512 * 7, 256, max_iters)
    ad_net_au = network.AdversarialNetwork(512, 256, max_iters)

    if cuda:
        model = model.cuda()
        ad_net = ad_net.cuda()
        ad_net_au = ad_net_au.cuda()
        
    for epoch in range(1, epochs+1):
        train(model, ad_net, None, epoch, method)
        test(model)  
    
    test1(model)  
        
        
            