import os
from utils.log import MyLog
from config import Config
from model import PromptBert, compute_loss, simloss, infoloss
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.dataloader import GetDataloader
import warnings
import random
import pynvml
from scipy.stats import spearmanr

use_device_id = 3
warnings.filterwarnings("ignore")
torch.cuda.set_device(use_device_id)


class FrameWork:
    def __init__(self, config):
        self.config = config
        self.log = MyLog(config.log_dir, __file__).getlog()
        pynvml.nvmlInit()
        handle = pynvml.nvmlDeviceGetHandleByIndex(use_device_id)
        self.memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
        self.load(True)

    def load(self, continue_train=False):
        initial_usage = int(torch.cuda.memory_allocated() / 1024 / 1024)
        self.log.info(f'initial GPU memory use {initial_usage} MB')
        self.model = PromptBert(self.config).to(self.config.device)
        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.config.lr, eps=1e-3)
        use_ids = [3]
        if torch.cuda.device_count() > 1 and len(use_ids) > 1:
            self.log.info(f"Using device_ids:{use_ids} GPUS for BERT")
            self.model = nn.DataParallel(self.model, device_ids=use_ids)
            self.model = self.model.cuda()
        if continue_train:
            checkpoint = torch.load(self.config.save_path + '/train_loss_best.pth.tar')
            # chechpoint = torch.load(self.config.save_path + '/spear_best.pth.tar')
            # modelparam = chechpoint['state_dict']
            # needparam = self.model.state_dict()
            # pretrained_dict = {k: v for k, v in modelparam.items() if k in needparam}
            # needparam.update(pretrained_dict)
            # params = self.model.load_state_dict(needparam)
            self.model.load_state_dict(checkpoint['state_dict'])
            # for i in pretrained_dict:
            #     self.log.info(f'use params is {i}')
            # self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.log.info('load model/optimizer from checkpoint')
        self.model.train()
        self.scheduler = torch.optim.lr_scheduler.StepLR(optimizer=self.optimizer, step_size=1, gamma=0.8)
        model_usage = int(torch.cuda.memory_allocated() / 1024 / 1024)
        self.log.info(f'total GPU memory use {model_usage} MB, model initial use {model_usage - initial_usage} MB')

    def train(self, train_iter, dev_iter):
        config = self.config
        train_best_loss = float('inf')
        dev_best_sp = float('inf')
        spear_best = 0
        last_improve = 0
        sum_len, sum_cos, sum_sen = 0, 0, 0
        for epoch in range(1, config.num_epochs):
            self.log.info('-' * 60 + f'Epoch [{epoch}/{config.num_epochs}] start' + '-' * 60)
            sum_loss = 0
            for batch, input_ids in enumerate(train_iter, start=1):
                self.optimizer.zero_grad()
                ori, ori_tmp, ori1, ori1_tmp, sim, sim_tmp, sim1, sim1_tmp = \
                    [input_ids.get(i).to(self.config.device) for i in ['ori', 'ori_tmp', 'ori1', 'ori1_tmp',
                                                                       'sim', 'sim_tmp', 'sim1', 'sim1_tmp']]
                ori = self.model(ori, ori_tmp)
                ori1 = self.model(ori1, ori1_tmp)
                sim = self.model(sim, sim_tmp)
                sim1 = self.model(sim1, sim1_tmp)
                assert ori.size(0) == sim.size(0), 'ori != sim size'
                loss = infoloss(self.config, [ori, ori1, sim, sim1])
                if torch.isnan(loss):
                    del loss
                    if hasattr(torch.cuda, 'empty_cache'):
                        torch.cuda.empty_cache()
                    self.log.warn(f'训练:第{batch}个batch计算出现NaN')
                    continue
                sum_loss += loss.item()
                loss.backward()
                self.optimizer.step()
                # self.log.info(loss, sum_loss)
                del loss
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
                if batch and batch % 2000 == 0:
                    thred = 0.1
                    if batch % 20000 == 0: thred = 1
                    train_loss = sum_loss / 2000
                    dev_loss, score = self.evalute(config, dev_iter, thred=thred)
                    if thred == 1 and dev_loss < dev_best_sp:
                        dev_best_sp = dev_loss
                        if not os.path.exists(config.save_path):
                            os.makedirs(config.save_path)
                        state = {
                            'eopch': epoch + 1,
                            'state_dict': self.model.state_dict(),
                            'optimizer': self.optimizer.state_dict()}
                        torch.save(state, config.save_path + '/dev_loss_best.pth.tar')
                        improve = '--improve'
                        last_improve = epoch
                    else:
                        improve = ''
                    if thred == 1 and sum_cos < train_best_loss:
                        train_best_loss = sum_loss
                        state = {
                            'eopch': epoch + 1,
                            'state_dict': self.model.state_dict(),
                            'optimizer': self.optimizer.state_dict()}
                        torch.save(state, config.save_path + '/train_loss_best.pth.tar')
                        improve = '**improve'
                    if thred == 1 and score > spear_best:
                        spear_best = score
                        state = {
                            'eopch': epoch + 1,
                            'state_dict': self.model.state_dict(),
                            'optimizer': self.optimizer.state_dict()}
                        improve = '##improve'
                        torch.save(state, config.save_path + '/spear_best.pth.tar')
                    self.log.info(
                        f'Iter:{batch},Device {use_device_id}[total:{int(self.memory.total / 1024 ** 2)}MB,'
                        f'use:{int(self.memory.used / 1024 ** 2)}MB,'
                        f'free:{int(self.memory.free / 1024 ** 2)}MB],'
                        f'Train Loss:{train_loss:.6f},'
                        f'use val_data:{thred:.0%} '
                        f'Val Loss:{dev_loss:.5f} spearman_socre:{score:.5f} {improve}')
                    sum_loss = 0
                    sum_cos = 0
                    self.model.train()
            if epoch - last_improve > config.require_improvement:
                self.log.info("No optimization for a long time , auto-stopping...")
                break
            self.scheduler.step()

    def evalute(self, config, data_iter, thred=0.1):
        DEVICE = config.device
        self.model.eval()
        valid_sum_loss = 0
        sim_tensor = torch.tensor([], device=DEVICE)
        label_array = []
        count = 1
        with torch.no_grad():
            for batch, input_ids in enumerate(data_iter, start=1):
                prob = random.random()
                if prob > thred: continue
                count += 1
                ori, ori_tmp, sim, sim_tmp = [input_ids.get(i).to(self.config.device) for i in
                                              ['ori', 'ori_tmp', 'sim', 'sim_tmp']]
                ori = self.model(ori, ori_tmp)
                sim = self.model(sim, sim_tmp)
                assert ori.size(0) == sim.size(0), 'ori size != sim size'
                devloss = simloss(self.config, ori, sim)
                if torch.isnan(devloss):
                    del devloss
                    if hasattr(torch.cuda, 'empty_cache'):
                        torch.cuda.empty_cache()
                    self.log.warn(f'测试:第{batch}个batch计算出现NaN')
                    continue
                valid_sum_loss += devloss.item()
                batch_len = len(ori)
                neg = torch.cat([sim[2:], sim[:2]], dim=0)
                unsim = F.cosine_similarity(ori, neg, dim=-1)
                sim = F.cosine_similarity(ori, sim, dim=-1)
                if torch.isnan(sim).any() or torch.isnan(unsim).any():
                    continue
                sim_tensor = torch.cat((sim_tensor, unsim), dim=0)
                sim_tensor = torch.cat((sim_tensor, sim), dim=0)
                label = [0] * int(batch_len) + [1] * int(batch_len)
                label_array.extend(label)
                torch.cuda.empty_cache()
        spearman_socre = spearmanr(label_array, sim_tensor.cpu().numpy()).correlation
        self.model.train()
        return valid_sum_loss / count, float(spearman_socre)


if __name__ == '__main__':
    config = Config()
    main = FrameWork(config)
    train, valid = GetDataloader(config)
    main.train(train, valid)
