# -*- coding:utf-8 -*-#
# @Time:2023/6/1 14:32
# @Author:Adong
# @Software:PyCharm

"""
重要文件，谨慎修改！
重要文件，谨慎修改！
重要文件，谨慎修改！
重要文件，谨慎修改！
重要文件，谨慎修改！
"""


import torch
from net import MyLeNet5,MyLeNet5_NCL,ResNet50_NCL,ResNet50
from torch.autograd import Variable
from torchvision import transforms
import os
from PIL import Image
import dataset
from torch.utils import data
from tqdm import tqdm
from CNN_train import Trainer


class verification:
    def __init__(self,model_root,veri_set):
        self.model_root = model_root
        # self.veri_set = veri_set
        self.data_transform = transforms.Compose([transforms.ToTensor()])
        # 如果有显卡，可以转到GPU
        self.device = "cuda" if torch.cuda.is_available() else 'cpu'
        print("running by " + self.device + "!")
        self.model = MyLeNet5(5).to(self.device)  # 调用MyLeNet里面定义的网络，将模型数据转到GPU
        self.model.load_state_dict(torch.load(self.model_root))  # 将之前保存的网络权重加载到model上
        # 读取dataset
        self.veri_dataset = dataset.makeDataset('veri','wavV3_to_RGB')
        # 加载DataLoder
        self.batch_size = 32
        self.veri_dataloader = data.DataLoader(dataset=self.veri_dataset, batch_size=self.batch_size, shuffle=True)

    # def get_faulttype(self):
    #     faulttype = os.listdir(self.veri_set)
    #     return faulttype

    def start_verifing(self,mode = 'test'):
        '''
        验证的执行函数
        :param if_denoising:执行的模型是否含有降噪部分
        :return:
        '''
        if mode == 'test':
            model = self.model
            device = self.device
            dataloader = self.veri_dataloader
            model.eval()  # 将模型设置为验证模式，告诉网络当前阶段为测试阶段，模型的参数再该阶段不更新
            cumulative_loss = 0  # 已训练batch的累计误差
            cumulative_acc = 0.0  # 已训练batch的累计准确率
            n = 0  # 已训练batch的计数

            with torch.no_grad():
                t = tqdm(dataloader, desc=f'[verification]')
                for X, y in t:
                    X, y = X.to(device), y.to(device)
                    output = model(X)
                    '''ResNet50_NCL'''
                    # o1, o2, o3, o4, o5 = model(X)
                    # output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值
                    '''proposed ResNet50_NCL'''
                    # o1, o2, o3, o4, o5 = model(X)
                    # S = Trainer.significance([o1, o2, o3, o4, o5])  # 计算基学习器显著性
                    # for idx, o in enumerate([o1, o2, o3, o4, o5]):
                    #     s = S[:, idx]
                    #     for idxx, ss in enumerate(s):
                    #         o[idxx] = o[idxx] * ss
                    # output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值

                    _, pred = torch.max(output, dim=1)  # 返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。
                    cur_acc = torch.sum(y.eq(pred)) / output.shape[0]
                    cumulative_acc += cur_acc.item()
                    n = n + 1
                    t.set_postfix({'veri_Average_acc': cumulative_acc / n})
                print("veri_Average_acc:" + str(cumulative_acc / n))

            # total = 0
            # ac = 0
            # print("start verifing!")
            # faulttype = self.get_faulttype()  # 设置结果分类表
            # device = self.device
            # model = self.model
            # for ft in os.listdir(self.veri_set):
            #     fn = os.listdir(self.veri_set + '/' + ft)
            #     fn.sort(key=lambda x: int(x[:-4]))
            #     for file in fn:
            #         y = ft
            #         img = Image.open(self.veri_set + '/' + ft + '/' + file)
            #         X = self.data_transform(img)
            #         X = Variable(torch.unsqueeze(X, dim=0).float(), requires_grad=False).to(device)
            #         with torch.no_grad():
            #             pred = model(X)  # pred记录各类的概率
            #
            #             '''集成'''
            #             # pred = (pred[0]+pred[1]+pred[2]+pred[3]+pred[4])
            #             '''改进集成'''
            #             pred = list(pred)
            #             from LeNet_train import Trainer
            #             S = Trainer.significance(pred)  # 计算基学习器显著性
            #             for idx, o in enumerate(pred):
            #                 pred[idx] = o.cpu() * S[0,idx]
            #             pred = (pred[0]+pred[1]+pred[2]+pred[3]+pred[4]) / 5  # 集成输出值
            #
            #             predicted, actual = faulttype[torch.argmax(pred[0])], y  # 因为pred是二维的，所以要用pred[0]
            #             # print(f'predicted:"{predicted}",actual:"{actual}",file:"{file}"')
            #             total += 1
            #             if actual == predicted:
            #                 ac += 1
            # print("acc = " + str(ac/total*100))
        elif mode == 'use':
            faulttype = os.listdir('./data/FFTdenoising_wavV4_to_gray/train')
            device = self.device
            model = self.model
            fn = os.listdir(self.veri_set)
            for file in fn:
                img = Image.open(self.veri_set + '/' + file)
                X = self.data_transform(img)
                X = Variable(torch.unsqueeze(X, dim=0).float(), requires_grad=False).to(device)
                with torch.no_grad():
                    pred = model(X)  # pred记录各类的概率
                    predicted, actual = faulttype[torch.argmax(pred[0])], 'unknown'  # 因为pred是二维的，所以要用pred[0]
                    print(f'predicted:"{predicted}",actual:"{actual}",file:"{file}"')


if __name__ == '__main__':
    zqd = verification(model_root='./save_model/LeNet_VMyLeNet5.pth',veri_set='./data/wavV3_to_RGB')
    zqd.start_verifing(mode='test')
    # zqd = verification(model_root='./save_model/LeNet_V13.pth',veri_set='./data/unknown_for_eveluate/unknowntrain')
    # zqd.start_verifing(mode='use')

