import os
from utils.log import MyLog
from config import Config
from model import SimCSE
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.dataloader import GetDataloader
import warnings
from scipy.stats import spearmanr
import random

warnings.filterwarnings("ignore")
torch.cuda.set_device(1)


class FrameWork:
    def __init__(self, config):
        self.config = config
        self.log = MyLog(config.log_dir, __file__).getlog()
        self.load(True)

    def load(self, continue_train=False):
        self.model = SimCSE(self.config).to(self.config.device)
        model_parameters = list(self.model.named_parameters())
        optimizer_grouped_parameters = [
            {'params': [p for n, p in model_parameters if 'bert' in n], 'lr': self.config.bert_lr},
            {'params': [p for n, p in model_parameters if 'bert' not in n], 'lr': self.config.fc_lr}]
        self.optimizer = torch.optim.AdamW(optimizer_grouped_parameters)
        if continue_train:
            chechpoint = torch.load(self.config.save_path + '/checkpoint.pth.tar')
            self.model.load_state_dict(chechpoint['state_dict'])
            self.optimizer.load_state_dict(chechpoint['optimizer'])
            self.log.info('load model/optimizer from checkpoint')
        if torch.cuda.device_count() > 1:
            use_ids = [3]
            self.log.info(f"Using device_ids:{use_ids} GPUS for BERT")
            self.model = nn.DataParallel(self.model, device_ids=use_ids)
            self.model = self.model.cuda()
        self.model.train()
        self.scheduler = torch.optim.lr_scheduler.StepLR(optimizer=self.optimizer, step_size=1, gamma=0.9)

    def train(self, train_iter, dev_iter):
        train_best_loss = float('inf')
        dev_best_sp = float('inf')
        last_improve = 0
        sum_len, sum_cos, sum_sen = 0, 0, 0
        for epoch in range(1, config.num_epochs):
            self.log.info(f'Epoch [{epoch}/{config.num_epochs}]')
            sum_loss = 0
            # std_batch = 10
            for batch, input_ids in enumerate(train_iter, start=1):
                self.optimizer.zero_grad()
                outputs = self.model(input_ids)
                outputs.to(config.device)
                lenloss, cos_loss, sen_loss, std_loss = self.model.module.multi_loss(outputs)
                sum_len += lenloss.item()
                sum_cos += cos_loss.item()
                sum_sen += sen_loss.item()
                # loss = 0.05 * lenloss + cos_loss + 0.2 * sen_loss
                loss = lenloss + cos_loss + 0.2 * sen_loss + std_loss
                # loss += std_loss
                # if batch and not batch % std_batch:
                #     loss += std_loss
                #     std_batch += 2
                loss.backward()
                self.optimizer.step()
                sum_loss += loss.item()
                del loss
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
                if batch and batch % 1000 == 0:
                    thred = 1
                    # if batch % 50000 == 0: thred = 1
                    train_loss = round(sum_loss / 1000, 5)
                    sum_len = round(sum_len / 1000, 5)
                    sum_cos = round(sum_cos / 1000, 5)
                    sum_sen = round(sum_sen / 1000, 5)
                    dev_loss, score = self.evalute(config, dev_iter, thred=thred)
                    if thred == 1 and dev_loss < dev_best_sp:
                        dev_best_sp = dev_loss
                        if not os.path.exists(config.save_path):
                            os.makedirs(config.save_path)
                        state = {
                            'eopch': epoch + 1,
                            'state_dict': self.model.module.state_dict(),
                            'optimizer': self.optimizer.state_dict()}
                        torch.save(state, config.save_path + '/checkpoint.pth.tar')
                        improve = '--improve'
                        last_improve = epoch
                    else:
                        improve = ''
                    if thred == 1 and sum_cos < train_best_loss:
                        train_best_loss = sum_cos
                        torch.save(self.model.module.state_dict(),
                                   config.save_path + f'/new_{epoch}_{round(train_loss, 5)}.pt')
                        improve = '**improve'
                    self.log.info(f'Iter:{batch},Train Loss:{train_loss:.5f},L2 Loss:{sum_len:.5f},'
                                  f'Cosine Loss:{sum_cos:.5f},Sentence Loss:{sum_sen:.5f}, Std Loss:{std_loss:.5f}'
                                  f'use val_data:{thred:.0%} '
                                  f'Val Loss:{dev_loss:.5f} spearman_socre:{score:.5f} {improve}')
                    sum_loss = 0
                    sum_len = 0
                    sum_cos = 0
                    sum_sen = 0
                    self.model.train()
            if epoch - last_improve > config.require_improvement:
                self.log.info("No optimization for a long time , auto-stopping...")
                break
            self.scheduler.step()

    def evalute(self, config, data_iter, thred=0.1):
        DEVICE = config.device
        self.model.eval()
        valid_sum_loss = 0
        sim_tensor = torch.tensor([], device=DEVICE)
        label_array = []
        with torch.no_grad():
            for batch, input_ids in enumerate(data_iter, start=1):
                prob = random.random()
                if prob > thred: continue
                outputs = self.model(input_ids)
                outputs.to(DEVICE)
                lenloss, cos_loss, sen_loss, std_loss = self.model.module.multi_loss(outputs)
                loss = cos_loss
                valid_sum_loss += loss.item()
                batch_len = int(outputs.size(0) / 3) * 3
                ori = outputs[0:batch_len - 2:3]
                pos = outputs[1:batch_len - 1:3]
                neg = outputs[2:batch_len:3]
                label = [0] * int((batch_len / 3)) + [1] * int((batch_len / 3))
                unsim = F.cosine_similarity(ori, neg, dim=-1)
                sim = F.cosine_similarity(ori, pos, dim=-1)
                sim_tensor = torch.cat((sim_tensor, unsim), dim=0)
                sim_tensor = torch.cat((sim_tensor, sim), dim=0)
                label_array.extend(label)
        spearman_socre = spearmanr(label_array, sim_tensor.cpu().numpy()).correlation
        self.model.train()
        return valid_sum_loss / len(data_iter), float(spearman_socre)


if __name__ == '__main__':
    config = Config()
    main = FrameWork(config)
    train, valid = GetDataloader(config)
    main.train(train, valid)
