import os
import pickle
import random
import time
from copy import deepcopy

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import yaml
from sshkeyboard import listen_keyboard
from torch.autograd import Variable
from torch.nn import init
from torch.utils.data import Dataset as BaseDataset
from tqdm import tqdm

from TransGan_train import TrainProcess as GanTrainProcess
from utils import DataPreProcess, DataResult, MyDataset, myplot

# class PlotProcess(Process): #继承Process类
#     def __init__(self, processeddata, IMAGE, imagepath, T.test_result):
#         super().__init__()
#         pass
        

#     def run(self):
#         print('开始画图')
#         myplot()




def setup_seed(seed):
   random.seed(seed)
   os.environ['PYTHONHASHSEED'] = str(seed)
   np.random.seed(seed)
   random.seed(seed)
   torch.manual_seed(seed)
   torch.cuda.manual_seed(seed)
   torch.cuda.manual_seed_all(seed)
   torch.backends.cudnn.benchmark = True 
#    torch.backends.cudnn.deterministic = True    
#    torch.backends.cuda.matmul.allow_tf32 = True
#    torch.backends.cudnn.allow_tf32 = True

        
class TrainProcess():
    def __init__(self, 
                 model, 
                 modelname='',
                 processeddata=[None, None, {}], 
                 train_config=None, 
                 resultpath=None,
                 plotwrapper=None,
                 pipesend=None,
                 savingthread=None) -> None:
        
        self.modelname = modelname
        self.patchdata, self.groudtruth, self.pos_label = processeddata
        self.pos_dict = DataPreProcess.parsespdata(self.pos_label)
        self.train_config = train_config
        self.model = model
        self.pipesend = pipesend
        self.savingthread = savingthread
        self.resultpath = resultpath
        self.plotwrapper = plotwrapper
        self.bceloss = nn.BCELoss().cuda()
        self.crossloss= nn.CrossEntropyLoss().cuda()
        self.train_result = DataResult()
        self.valid_result = DataResult()
        self.test_result = DataResult()
        self.model.to('cuda')
        self.before_train()
        setup_seed(1993)


    def before_train(self):
        if self.train_config:
            with open(self.train_config) as file:
                dict = file.read()
            config = yaml.load(dict, Loader=yaml.FullLoader)
            TRAIN_BATCHSIZE = config['train_batchsize'] 
            TEST_BATCHSIZE = config['test_batchsize']
            VALID_BATCHSIZE = config['valid_batchsize']
        
            if 'train' in self.pos_label.keys():
                training_dataset = MyDataset(self.patchdata, self.groudtruth, self.pos_dict['train'])
                self.train_loader = torch.utils.data.DataLoader(
                    dataset=training_dataset,
                    batch_size=TRAIN_BATCHSIZE,
                    shuffle=True
                    )
            else:
                self.train_loader = None

            if 'test' in self.pos_label.keys():
                test_dataset = MyDataset(self.patchdata, self.groudtruth, self.pos_dict['test'])
                self.test_loader = torch.utils.data.DataLoader(
                    dataset=test_dataset,
                    batch_size=TEST_BATCHSIZE,
                    shuffle=False
                )
            else:
                self.test_loader = None

        
            if 'valid' in self.pos_label.keys():
                valid_dataset = MyDataset(self.patchdata, self.groudtruth, self.pos_dict['valid'])
                self.valid_loader = torch.utils.data.DataLoader(
                    dataset=valid_dataset,
                    batch_size=VALID_BATCHSIZE,
                    shuffle=False
                )
            else:
                self.valid_loader = None

    def training_start(self):
        print('--------------------------训练----------------------------')
        # 使用GPU
        with open(self.train_config) as file:
            dict = file.read()
        config = yaml.load(dict, Loader=yaml.FullLoader)
        learning_rate = config['learning_rate']
        TRAIN_BATCHSIZE = config['train_batchsize'] 
        EPOCH = config['epoch']    
        OPTIMIZER = config['optimization'] 
        if OPTIMIZER == 'Adam':
            # optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)  
            self.optimizer = optim.Adam(
            self.model.parameters(),
            lr=learning_rate,
            betas=(0.9, 0.999),
            eps=1e-8,
            weight_decay=0)
        elif OPTIMIZER == 'SGD':
            self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate) 
        #  损失函数
        # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        #     optimizer, mode='min', factor=0.5, patience=10
        # )
        # # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        # #     optimizer, 15, eta_min=0.0, last_epoch=-1)
        try:
            with open(self.resultpath + 'result.pkl', 'rb') as f:
                pklfile = pickle.load(f)
            best_validacc = pklfile.accuracy_score 
            print('目前最佳精度{}'.format(best_validacc))

        except Exception as e:
            print(e)
            best_validacc = 0

        plot_flag = False
        for epoch in range(1, EPOCH+1):
            start = time.time()
            trainloss_sigma = 0.0
            for batch_idx, data in enumerate(tqdm(self.train_loader)):
                for idx, item in enumerate(data):
                    data[idx] = item.to('cuda')    # 记录一个epoch的loss之和
           
                inputs, labels = data
                # onehot_target = torch.eye(2)[labels.long(), :].to('cuda')
                # labels = labels.unsqueeze(1)
                self.optimizer.zero_grad()
                # 清空梯度
                self.model.train()
                outputs = self.model(inputs)
                # print(outputs)
                # outputs.retain_grad()
                if isinstance(outputs, torch.Tensor):
                    if len(outputs.shape) == 1:
                        loss = self.bceloss(outputs, labels.float()) 
                    else:
                        loss = self.crossloss(outputs, labels.long())
                else:
                    if len(outputs[1].shape) == 1:
                        loss = self.bceloss(outputs[1], labels.float()) 
                    else:
                        loss = self.crossloss(outputs[1], labels.long())
                # loss = self.criterion(outputs, onehot_target) 
                # print(self.evaluate(self.train_loader, self.train_result))
                loss.backward()  # 反向传播 optimizer.step()  # 更新权值
                self.optimizer.step()  
                # 统计预测信息
                trainloss_sigma += loss.item()
                # 每 BATCH_SIZE 个 iteration 打印一次训练信息，loss为 BATCH_SIZE 个 iteration 的平均   
            loss_avg = trainloss_sigma * TRAIN_BATCHSIZE / self.pos_dict['train'].shape[0] 
            train_acc = self.evaluate(self.train_loader, self.train_result)
            print("Training: Epoch[{:03}/{:0>3}] Loss: {:.8f} Acc:{:.2%} Lr:{:.2}".format(
            epoch, EPOCH,  loss_avg, train_acc, self.optimizer.state_dict()['param_groups'][0]['lr']))
            print(self.plotwrapper[2])
            # scheduler.step(loss_avg)  # 更新学习率
        # ------------------------------------ 观察模型在验证集上的表现 ------------------------------------
            if self.valid_loader:
                try:
                    valid_acc= self.evaluate(self.valid_loader, self.valid_result)
                    print('{} set Accuracy:{:.2%}'.format('Valid', valid_acc))
                    print('best valacc:{:.2%}'.format(best_validacc))
                    if valid_acc > best_validacc:
                        plot_flag = True
                        print("Higher Valid Accuracy:%{:.2%}, Old Valid Accuracy:%{:.2%}".format(valid_acc, best_validacc))
                        best_validacc = valid_acc
                        self.bestmodel = deepcopy(self.model.state_dict())
                        
                    if self.pipesend and plot_flag and epoch%50==0:
                        print('='*20 + 'saving model' + '='*20)
                        torch.save(self.bestmodel, self.resultpath + 'bestmodel.pth') 
                        plot_flag = False
                        self.pipesend.send(['plot', best_validacc])
                
                except Exception as e:
                    print(e)
                end = time.time()       
                print('模型{}的这轮训练{:.2f}分钟'.format(self.modelname, (end - start) / 60))
        print('===================Training Finished ======================')
         
        

    def evaluate(self, test_loader, data_result: DataResult):
        '''
        返回accf
        '''
        data_result.refresh()
        loss_sigma = 0.0
        with torch.no_grad():
            for batch_idx, data in enumerate(test_loader):
               
                for idx, item in enumerate(data):
                    data[idx] = item.to('cuda')
                images, labels = data
                # onehot_target = torch.eye(2)[labels.long(), :].to('cuda')
                # labels = labels.unsqueeze(1)
                self.model.eval()
                outputs = self.model(images)  # forward
       
                if isinstance(outputs, torch.Tensor):
                    predicted = outputs.data 
                    # _, predicted = torch.max(outputs, 1)  # 统计
                 
                    
                else:
                    # _, predicted = torch.max(outputs[1].data[:,:2], 1)  # 统计 
                    predicted = outputs[1].data 
                   
                    # _, predicted = torch.max(outputs[1], 1)  # 统计
                if len(predicted.shape) == 2:
                    predicted = nn.Softmax()(predicted)
                    predicted = predicted[:,1]

                data_result.y_score += list(predicted.cpu().numpy())   
                data_result.y_true += list(labels.cpu().numpy())
        data_result.get_metric()
        return data_result.accuracy_score


if __name__ == '__main__':
    a = DataResult()
    print('end')
