import os
import pickle
import random
import time
from copy import deepcopy

import numpy as np
import scipy.io as sio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import yaml
from sklearn import metrics
from sklearn.utils import shuffle
from sshkeyboard import listen_keyboard
from torch.autograd import Variable
from torch.nn import init
from torch.utils.data import Dataset as BaseDataset
from tqdm import tqdm

from utils import DataResult, MyDataset, myplot

# class PlotProcess(Process): #继承Process类
#     def __init__(self, processeddata, IMAGE, imagepath, T.test_result):
#         super().__init__()
#         pass
        

#     def run(self):
#         print('开始画图')
#         myplot()




def setup_seed(seed):
   random.seed(seed)
   os.environ['PYTHONHASHSEED'] = str(seed)
   np.random.seed(seed)
   random.seed(seed)
   torch.manual_seed(seed)
   torch.cuda.manual_seed(seed)
   torch.cuda.manual_seed_all(seed)
   torch.backends.cudnn.benchmark = True 
#    torch.backends.cudnn.deterministic = True    
#    torch.backends.cuda.matmul.allow_tf32 = True
#    torch.backends.cudnn.allow_tf32 = True

        
class TrainProcess():
    def __init__(self, model_dict, processeddata=None, plotwrapper=None, train_config=None, resultpath=None, pipesend=None, writerpath=None) -> None:
        super().__init__()
        self.processeddata = processeddata
        self.train_config = train_config
        self.pipesend = pipesend
        self.modelname = 'TransGan'
        self.resultpath = resultpath
        self.criterion = nn.BCELoss().cuda()
        self.train_result = DataResult()
        self.valid_result = DataResult()
        self.test_result = DataResult()
        self.gen_model = model_dict['Gen']
        self.dis_model = model_dict['Dis']
        self.plotwrapper = plotwrapper
        self.plot_flag = False
        setup_seed(1993)
      
    def training_start(self):
        print('--------------------------训练----------------------------')
        # 使用GPU
        with open(self.train_config) as file:
            dict = file.read()
            config = yaml.load(dict, Loader=yaml.FullLoader)
        G_learning_rate = config['G_learning_rate']
        D_learning_rate = config['D_learning_rate']
        EPOCH = config['epoch']    
        TRAIN_BATCHSIZE = config['train_batchsize'] 
        TEST_BATCHSIZE = config['test_batchsize']
        VALID_BATCHSIZE = config['valid_batchsize']
        G_OPTIMIZER = config['G_optimization'] 
        D_OPTIMIZER = config['D_optimization'] 
        # writer = SummaryWriter(writer_path + 'logs/' + datetime.now().strftime("%Y%m%d-%H%M%S"))  
        # writer.add_graph(self.model, torch.rand((1,) + self.data_mix['train_patch'].shape[1:]).cuda()) 
        if G_OPTIMIZER == 'Adam':
            # optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)  
            gen_optimizer = optim.Adam(
            self.gen_model.parameters(),
            lr=G_learning_rate,
            betas=(0.9, 0.999),
            eps=1e-8,
            weight_decay=0)
        if D_OPTIMIZER == 'Adam':
            # optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)  
            dis_optimizer = optim.Adam(
            self.dis_model.parameters(),
            lr=D_learning_rate,
            betas=(0.9, 0.999),
            eps=1e-8,
            weight_decay=0)
        if G_OPTIMIZER == 'SGD':
            gen_optimizer = optim.SGD(self.gen_model.parameters(), lr=G_learning_rate) 
        if D_OPTIMIZER == 'SGD':
            dis_optimizer = optim.SGD(self.dis_model.parameters(), lr=D_learning_rate) 
        #  损失函数
        # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        #     optimizer, 15, eta_min=0.0, last_epoch=-1)

        training_dataset = MyDataset(self.processeddata['train'].patch, self.processeddata['train'].gt, cuda=True)
        self.train_loader = torch.utils.data.DataLoader(
            dataset=training_dataset,
            batch_size=TRAIN_BATCHSIZE,
            shuffle=True
        )
        test_dataset = MyDataset(self.processeddata['test'].patch, self.processeddata['test'].gt, cuda=False)
        self.test_loader = torch.utils.data.DataLoader(
            dataset=test_dataset,
            batch_size=TEST_BATCHSIZE,
            shuffle=False
        )
        
        if self.processeddata['valid'].gt.shape[0] != 0:
            valid_dataset = MyDataset(self.processeddata['valid'].patch, self.processeddata['valid'].gt, cuda=True)
            self.valid_loader = torch.utils.data.DataLoader(
                dataset=valid_dataset,
                batch_size=VALID_BATCHSIZE,
                shuffle=True
            )
        else:
            valid_dataset = None

        
        self.gen_model = self.gen_model.to('cuda')
        self.dis_model = self.dis_model.to('cuda')

        try:
            with open(self.resultpath + 'result.pkl', 'rb') as f:
                pklfile = pickle.load(f)
                self.best_validacc = pklfile.accuracy_score 
                print('目前最佳精度{}'.format(self.best_validacc))

        except:
            self.best_validacc = 0

        # tensor placeholders
        
        real_label = 1
        fake_label = 0


        
        # noise for evaluation
        for epoch in range(1, EPOCH+1):
            start = time.time()
            trainloss_sigma = 0.0    # 记录一个epoch的loss之和
            self.gen_model.train()
            self.dis_model.train()
            for batch_idx, data in enumerate(tqdm(self.train_loader)):
                for idx, item in enumerate(data):
                    data[idx] = item.to('cuda')

                
                ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
                self.dis_model.zero_grad()
                real_cpu, label = data
                batch_size = real_cpu.size(0)
        
                noise = torch.FloatTensor(batch_size, 50).to('cuda')
                dis_label = torch.FloatTensor(batch_size).to('cuda')
                aux_label = torch.FloatTensor(batch_size).to('cuda')
                # input.data.resize_as_(real_cpu).copy_(real_cpu)
                dis_label.data.resize_(batch_size).fill_(real_label)
                aux_label.data.resize_(batch_size).copy_(label)
                dis_output, aux_output = self.dis_model(real_cpu)

                dis_errD_real = self.criterion(dis_output, dis_label)
                aux_errD_real =  self.criterion(aux_output, aux_label)
                errD_real = dis_errD_real + aux_errD_real
                errD_real.backward()

                # train with fake
                noise.data.resize_(batch_size, 50).normal_(0, 1)
                # label = np.random.randint(0, 2, batch_size)
                # noise_ = np.random.normal(0, 1, (batch_size, 50))
                class_onehot = torch.ones((batch_size, 5)).to('cuda')
                # class_onehot[np.arange(batch_size), label] = 1
                # noise_[np.arange(batch_size), :10] = class_onehot * label.unsqueeze(1)
                # noise_ = (torch.from_numpy(noise_))
                # noise.data.copy_(noise_.view(batch_size, 50))
                noise[np.arange(batch_size), :5] = class_onehot * label.unsqueeze(1)
                # aux_label.data.resize_(batch_size).copy_(torch.from_numpy(label))

                fake = self.gen_model(noise)
                dis_label.data.fill_(fake_label)
                dis_output, aux_output = self.dis_model(fake.detach())
                dis_errD_fake = self.criterion(dis_output, dis_label)
                aux_errD_fake = self.criterion(aux_output, aux_label)
                errD_fake = dis_errD_fake + aux_errD_fake
                errD_fake.backward()
            
                dis_optimizer.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                self.gen_model.zero_grad()
                dis_label.data.fill_(real_label)  # fake labels are real for generator cost
                dis_output, aux_output = self.dis_model(fake)
                dis_errG = self.criterion(dis_output, dis_label)
                aux_errG = self.criterion(aux_output, aux_label)
                trainloss_sigma = aux_errG.item() + trainloss_sigma
                errG = dis_errG + aux_errG
                errG.backward()
                gen_optimizer.step()
 
            if epoch%20 == 0 or epoch==1:
                self.pipesend.send(['plotEpoch', epoch])
            loss_avg = trainloss_sigma * TRAIN_BATCHSIZE / len(self.processeddata['train'].gt)
            train_acc = self.evaluate(self.train_loader, self.train_result, cuda=True)
            # writer.add_scalar('Train Acc', train_acc, global_step=epoch+1) 
            # writer.add_scalar('Train Loss', loss_avg, global_step=epoch+1) 
            print("Training: Epoch[{:03}/{:0>3}] Loss: {:.8f} Acc:{:.2%} Lr:{:.2}".format(
            epoch, EPOCH,  loss_avg, train_acc, dis_optimizer.state_dict()['param_groups'][0]['lr']))
            # scheduler.step(loss_avg)  # 更新学习率
        # ------------------------------------ 观察模型在验证集上的表现 -----------------------------------
            if valid_dataset:
                
                self.evaluateOnvalid(epoch)

            end = time.time()       
            print('模型{}的这轮训练{:.2f}分钟'.format(self.modelname, (end - start) / 60))
        print('===================Training Finished ======================')
    


    def evaluateOnvalid(self, epoch):
        try:
            valid_acc = self.evaluate(self.valid_loader, self.valid_result, cuda=True)
            print('{} set Accuracy:{:.2%}'.format('Valid', valid_acc))
            print('best valacc:{:.2%}'.format(self.best_validacc))
            if valid_acc > self.best_validacc:
                self.plot_flag = True
                print("Higher Valid Accuracy:%{:.2%}, Old Valid Accuracy:%{:.2%}".format(valid_acc, self.best_validacc))
                self.best_validacc = valid_acc
                self.bestmodel = deepcopy(self.dis_model.state_dict())
                self.bestgenmodel = deepcopy(self.gen_model.state_dict())    
            if self.pipesend and self.plot_flag and epoch%50 == 0:
                print('='*20 + 'saving all model' + '='*20)
                torch.save(self.bestgenmodel, self.resultpath + 'newgenmodel.pth')   
                torch.save(self.bestmodel, self.resultpath + 'newbestmodel.pth') 
                self.pipesend.send(['plotTest', self.best_validacc])
                self.plot_flag = False    
               
        except Exception as e:
            print(e)





    def evaluate(self, test_loader, data_result: DataResult, cuda=False):
        '''
        返回accf
        '''
        data_result.refresh()
        with torch.no_grad():
            for batch_idx, data in enumerate(test_loader):
                if not cuda:
                    for idx, item in enumerate(data):
                        data[idx] = item.to('cuda')
                images, labels = data
                self.dis_model.eval()
                outputs = self.dis_model(images)  # forward
                predicted = outputs[1]
                data_result.y_score+= list(predicted.cpu().numpy())
                data_result.y_true += list(labels.cpu().numpy())
        data_result.get_metric()
        return data_result.accuracy_score

 
if __name__ == '__main__':
    a = DataResult()
    print('end')
