import glob
import os
import random
from datetime import datetime
from multiprocessing import Process

import numpy as np
import scipy.io as sio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import yaml
from sklearn.utils import shuffle
from sshkeyboard import listen_keyboard
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.nn import init
from torch.utils.data import Dataset as BaseDataset
from tqdm import tqdm

from utils import DataResult, MyDataset, myplot

# class PlotProcess(Process): #继承Process类
#     def __init__(self, processeddata, IMAGE, imagepath, T.test_result):
#         super().__init__()
#         pass
        

#     def run(self):
#         print('开始画图')
#         myplot()




def setup_seed(seed):
   random.seed(seed)
   os.environ['PYTHONHASHSEED'] = str(seed)
   np.random.seed(seed)
   random.seed(seed)
   torch.manual_seed(seed)
   torch.cuda.manual_seed(seed)
   torch.cuda.manual_seed_all(seed)
   torch.backends.cudnn.benchmark = True 
#    torch.backends.cudnn.deterministic = True    
#    torch.backends.cuda.matmul.allow_tf32 = True
#    torch.backends.cudnn.allow_tf32 = True

        
class TrainProcess():
    def __init__(self, model_dict, processeddata, train_config, pipesend, writerpath) -> None:
        super().__init__()
        self.processeddata = processeddata
        self.train_config = train_config
        self.pipesend = pipesend
        self.writerpath = writerpath
        self.criterion = nn.CrossEntropyLoss()
        self.train_result = DataResult()
        self.valid_result = DataResult()
        self.test_result = DataResult()
        self.gen_model = model_dict['Gen']
        self.dis_model = model_dict['Dis']
        
       

      


    def training_start(self):
        print('--------------------------训练----------------------------')
        # 使用GPU
        with open(self.train_config) as file:
            dict = file.read()
            config = yaml.load(dict, Loader=yaml.FullLoader)
        G_learning_rate = config['G_learning_rate']
        D_learning_rate = config['D_learning_rate']
        EPOCH = config['epoch']    
        TRAIN_BATCHSIZE = config['train_batchsize'] 
        TEST_BATCHSIZE = config['test_batchsize']
        VALID_BATCHSIZE = config['valid_batchsize']
        writer_path = self.writerpath
        G_OPTIMIZER = config['G_optimization'] 
        D_OPTIMIZER = config['D_optimization'] 
        # writer = SummaryWriter(writer_path + 'logs/' + datetime.now().strftime("%Y%m%d-%H%M%S"))  
        # writer.add_graph(self.model, torch.rand((1,) + self.data_mix['train_patch'].shape[1:]).cuda()) 
        if G_OPTIMIZER == 'Adam':
            # optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)  
            gen_optimizer = optim.Adam(
            self.gen_model.parameters(),
            lr=G_learning_rate,
            betas=(0.9, 0.999),
            eps=1e-8,
            weight_decay=0)
        if D_OPTIMIZER == 'Adam':
            # optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)  
            dis_optimizer = optim.Adam(
            self.dis_model.parameters(),
            lr=D_learning_rate,
            betas=(0.9, 0.999),
            eps=1e-8,
            weight_decay=0)
        if G_OPTIMIZER == 'SGD':
            gen_optimizer = optim.SGD(self.gen_model.parameters(), lr=G_learning_rate) 
        if D_OPTIMIZER == 'SGD':
            dis_optimizer = optim.SGD(self.dis_model.parameters(), lr=D_learning_rate) 
        #  损失函数
        # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        #     optimizer, 15, eta_min=0.0, last_epoch=-1)

        training_dataset = MyDataset(self.processeddata['train'].patch, self.processeddata['train'].gt)


        train_sampler = torch.utils.data.distributed.DistributedSampler(training_dataset, shuffle=True)


        self.train_loader = torch.utils.data.DataLoader(
            sampler=train_sampler,
            dataset=training_dataset,
            batch_size=TRAIN_BATCHSIZE//4,
        )
        test_dataset = MyDataset(self.processeddata['test'].patch, self.processeddata['test'].gt)
        self.test_loader = torch.utils.data.DataLoader(
            dataset=test_dataset,
            batch_size=TEST_BATCHSIZE,
            shuffle=False
        )
        
        if self.processeddata['valid'].gt.shape[0] != 0:
            valid_dataset = MyDataset(self.processeddata['valid'].patch, self.processeddata['valid'].gt)
            self.valid_loader = torch.utils.data.DataLoader(
                dataset=valid_dataset,
                batch_size=VALID_BATCHSIZE,
                shuffle=True
            )
        else:
            valid_dataset = None

        
        self.gen_model = self.gen_model.double().to('cuda')
        self.dis_model = self.dis_model.double().to('cuda')
        best_validacc = 0
        Tensor = torch.cuda.DoubleTensor
        for epoch in range(EPOCH):
            self.train_loader.sampler.set_epoch(epoch)
            trainloss_sigma = 0.0    # 记录一个epoch的loss之和
            for batch_idx, data in enumerate(self.train_loader):
                for idx, item in enumerate(data):
                    data[idx] = item.to('cuda')
                imgs, gt = data

                one_hot_labels = torch.DoubleTensor(gt.shape[0], 2).cuda()
        # Adversarial ground truths6
                valid = Variable(Tensor(imgs.size(0), ).fill_(1.0), requires_grad=False)
                fake = Variable(Tensor(imgs.size(0), ).fill_(0.0), requires_grad=False)

        # Configure input
                real_imgs = Variable(imgs.type(Tensor))

        # -----------------

                gen_optimizer.zero_grad()

            # Sample noise as generator input
                z = torch.cuda.DoubleTensor(np.random.normal(0, 1, (imgs.shape[0], 40))).cuda()
                one_hot_labels.zero_()
                one_hot_labels.scatter_(1, gt.to('cuda').view(gt.shape[0],1), 1)

                self.gen_model.train()
                self.dis_model.train()
            # Generate a batch of images
                gen_imgs = self.gen_model(z, one_hot_labels)

                # self.dis_model.eval()
                # Loss measures generator's ability to fool the discriminator
                reality_gen, gt_gen = self.dis_model(gen_imgs)
                g1_loss = self.criterion(reality_gen, valid.long())
                g2_loss = self.criterion(gt_gen, torch.argmax(one_hot_labels, -1))
                c_loss = (g1_loss + g2_loss)/2
                c_loss.backward(retain_graph=True)
                # c_loss.backward()
                gen_optimizer.step()

                  # # ---------------------
                # #  Train Discriminator
                # # ---------------------
                dis_optimizer.zero_grad()

                # # Measure discriminator's ability to classify real from generated samples
                reality_real, cls_real = self.dis_model(real_imgs)
                reality_real_loss = self.criterion(reality_real, valid.long())
                cls_real_loss = self.criterion(cls_real, gt.long())
                trainloss_sigma += cls_real_loss.item()
                d1_loss = (reality_real_loss + cls_real_loss)/2

                d1_loss.backward(retain_graph=True)

                gen_imgs1 = torch.rand(gen_imgs.shape).double().cuda()
                with torch.no_grad():
                    gen_imgs1.set_(gen_imgs)

                reality_fake, cls_fake = self.dis_model(gen_imgs1)
                reality_fake_loss = self.criterion(reality_fake, fake.long())
                cls_fake_loss = self.criterion(cls_fake, torch.argmax(one_hot_labels, -1))
                
                d2_loss = (reality_fake_loss+ cls_fake_loss) / 2

                d2_loss.backward()
                # cls_fake_loss.backward(retain_graph=True)
                # cls_real_loss.backward()
                dis_optimizer.step()


            loss_avg = trainloss_sigma * TRAIN_BATCHSIZE / len(self.processeddata['train'].gt)
            train_acc = self.evaluate(self.train_loader, self.train_result)
            # writer.add_scalar('Train Acc', train_acc, global_step=epoch+1) 
            # writer.add_scalar('Train Loss', loss_avg, global_step=epoch+1) 
            print("Training: Epoch[{:03}/{:0>3}] Loss: {:.8f} Acc:{:.2%} Lr:{:.2}".format(
            epoch + 1, EPOCH,  loss_avg, train_acc, dis_optimizer.state_dict()['param_groups'][0]['lr']))
            # scheduler.step(loss_avg)  # 更新学习率
        # ------------------------------------ 观察模型在验证集上的表现 ------------------------------------
            if valid_dataset:
                valid_acc = self.evaluate(self.valid_loader, self.valid_result)
                # writer.add_scalar('Valid Acc', valid_acc, global_step=epoch+1)
                print('{} set Accuracy:{:.2%}'.format('Valid', valid_acc))
                if valid_acc > best_validacc:
                    print("Higher Valid Accuracy:{:.2%}, Old Valid Accuracy:{:.2%}".format(valid_acc, best_validacc))
                    best_validacc = valid_acc
                    self.pipesend.send('plot')
                print('best valacc:{:.2%}'.format(best_validacc))
        print('===================Training Finished ======================')
        if valid_dataset:
            print('Best {} set Accuracy:{:.2%}'.format('Valid', best_validacc)) 



    def evaluate(self, test_loader, data_result: DataResult):
        '''
        返回accf
        '''
        data_result.refresh()
        loss_sigma = 0.0
        with torch.no_grad():
            for batch_idx, data in enumerate(test_loader):
                for idx, item in enumerate(data):
                    data[idx] = item.to('cuda')
                images, labels = data
                # onehot_target = torch.eye(2)[labels.long(), :].to('cuda')
                # labels = labels.unsqueeze(1)
                self.model.eval()
                outputs = self.model(images)  # forward
                # print(outputs)
                #outputs.detach_()  # 不求梯度
                loss = self.criterion(outputs, labels.long())  # 计算loss
                # loss = self.criterion(outputs, onehot_target)  # 计算loss
                loss_sigma += loss.item()
                _, predicted = torch.max(outputs.data, 1)  # 统计
                # 统计混淆矩阵
                data_result.y_pre+= list(predicted.cpu().numpy())
                data_result.y_true += list(labels.cpu().numpy())
        data_result.get_confmat()
        return data_result.conf_mat.trace() / data_result.conf_mat.sum()

    @staticmethod      
    def __init__weight(model):
        for name, param in model.named_parameters():
            init.normal_(param, mean=0, std=0.01)
            print(name, param.data)

 
if __name__ == '__main__':
    a = DataResult()
    print('end')
