from tqdm import tqdm
import os
import time
import torch
import logging
from utils.log import Log
from utils.data_loader import get_dataloader, DataPrefetcher
from transformers.optimization import AdamW, get_cosine_schedule_with_warmup


class FrameWork:
    def __init__(self,con):
        self.config = con

    def train(self, train_prefetcher, model, optimizer, scheduler, epoch_steps):
        epoch_loss = 0.0
        TP = 0
        FP = 0
        TN = 0
        FN = 0
        model.train()
        model.to(self.config.device)
        pbar = tqdm(total=epoch_steps)
        batch_data = train_prefetcher.next()
        while batch_data is not None:
            input_ids, token_type_ids, attention_mask, labels = [v.to(self.config.device) for k,v in batch_data.items()]
            model.zero_grad()
            optimizer.zero_grad()
            predict_labels, loss = model.get_loss(input_ids, token_type_ids, attention_mask, labels)
            assert len(predict_labels[0]) == len(labels[0])
            epoch_loss += loss.item()
            loss.backward()
            #         torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=self.config.clip_grad)
            optimizer.step()
            scheduler.step()
            #         print(predict_labels[0])
            for i in range(len(predict_labels[0])):
                if predict_labels[0][i] != 0 and predict_labels[0][i] == labels[0][i]:
                    TP += 1
                elif predict_labels[0][i] != 0 and predict_labels[0][i] != labels[0][i]:
                    FP += 1
                elif predict_labels[0][i] == 0 and predict_labels[0][i] == labels[0][i]:
                    TN += 1
                elif predict_labels[0][i] == 0 and predict_labels[0][i] != labels[0][i]:
                    FN += 1
            batch_data = train_prefetcher.next()
            pbar.update(1)
        pbar.close()
        # assert (TP + FP + TN + FN) != trainset.num_chars != 0
        acc = (TP + TN) / (TP + TN + FP + FN + self.config.eps)
        precision = TP / (TP + FP + self.config.eps)
        recall = TP / (TP + FN + self.config.eps)
        f1 = (2 * precision * recall) / (precision + recall + self.config.eps)
        return epoch_loss / epoch_steps, precision, f1

    def valid(self, val_prefetcher, model, epoch_steps):
        epoch_loss = 0.0
        TP = 0
        FP = 0
        TN = 0
        FN = 0
        model.eval()
        pbar = tqdm(total=epoch_steps)
        batch_data = val_prefetcher.next()
        with torch.no_grad():
            while batch_data is not None:
                input_ids, token_type_ids, attention_mask, labels = [v.to(self.config.device) for k,v in batch_data.items()]
                predict_labels, loss = model.get_loss(input_ids, token_type_ids, attention_mask, labels)
                assert len(predict_labels[0]) == len(labels[0])
                epoch_loss += loss.item()
                #             print(labels[0])
                #             print(predict_labels[0])
                for i in range(len(predict_labels[0])):
                    #                 print('tp:',TP)
                    #                 print('fp:',FP)
                    if (predict_labels[0][i] != 0) and (predict_labels[0][i] == labels[0][i]):
                        TP += 1
                    elif (predict_labels[0][i] != 0) and (predict_labels[0][i] != labels[0][i]):
                        FP += 1
                    elif (predict_labels[0][i] == 0) and (predict_labels[0][i] == labels[0][i]):
                        TN += 1
                    elif (predict_labels[0][i] == 0) and (predict_labels[0][i] != labels[0][i]):
                        FN += 1
                batch_data = val_prefetcher.next()
                pbar.update(1)
        pbar.close()
        # assert (TP + FP + TN + FN) != validset.num_chars
        acc = (TP + TN) / (TP + TN + FP + FN + self.config.eps)
        precision = TP / (TP + FP + self.config.eps)
        recall = TP / (TP + FN + self.config.eps)
        f1 = (2 * precision * recall) / (precision + recall + self.config.eps)
        return epoch_loss / epoch_steps, precision, f1

    def main(self, model):
        if self.config.full_finetuning:
            # model.named_parameters(): [bert, classifier, crf]
            bert_optimizer = list(model.bert.named_parameters())
            classifier_optimizer = list(model.classifier.named_parameters())
            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [
                {'params': [p for n, p in bert_optimizer if not any(nd in n for nd in no_decay)],
                 'lr': self.config.learning_rate,
                 'weight_decay': self.config.weight_decay},
                {'params': [p for n, p in bert_optimizer if any(nd in n for nd in no_decay)],
                 'lr': self.config.learning_rate,
                 'weight_decay': 0.0},
                {'params': [p for n, p in classifier_optimizer if not any(nd in n for nd in no_decay)],
                 'lr': self.config.learning_rate, 'weight_decay': self.config.weight_decay},
                {'params': [p for n, p in classifier_optimizer if any(nd in n for nd in no_decay)],
                 'lr': self.config.crf_lr, 'weight_decay': 0.0},
                {'params': model.crf.parameters(), 'lr': self.config.crf_lr}
            ]
        # only fine-tune the head classifier
        else:
            classifier_optimizer = list(model.classifier.named_parameters())
            optimizer_grouped_parameters = [{'params': [p for n, p in classifier_optimizer], 'lr': self.config.crf_lr},
                                            {'params': model.crf.parameters(), 'lr': self.config.crf_lr}]

        optimizer = AdamW(optimizer_grouped_parameters, correct_bias=True)
        train_steps_per_epoch = 2025
        scheduler = get_cosine_schedule_with_warmup(optimizer,
                                                    num_warmup_steps=(self.config.num_epochs//10) * train_steps_per_epoch,
                                                    num_training_steps=self.config.num_epochs * train_steps_per_epoch)

        Log(self.config.log_dir)
        logging.info('当前训练模式fullfinetuning:{}'.format(self.config.full_finetuning))
        for epoch in range(self.config.num_epochs):
            trainloader, teopchs_steps = get_dataloader(self.config, 'train', num_workers=0)
            validloader, vepochs_steps = get_dataloader(self.config, 'valid', num_workers=0)
            train_prefetcher = DataPrefetcher(trainloader)
            val_prefetcher = DataPrefetcher(validloader)
            logging.info('开始训练')
            start_time = time.time()
            train_loss, train_prec, train_f1 = self.train(train_prefetcher, model, optimizer, scheduler, teopchs_steps)
            logging.info('开始验证')
            valid_loss, valid_prec, valid_f1 = self.valid(val_prefetcher, model, vepochs_steps)
            #     scheduler.step()
            end_time = time.time()
            use_time = end_time - start_time
            logging.info('训练集精确率:{},训练集f1:{}'.format(train_prec, train_f1))
            logging.info('验证集精确率:{},验证集f1:{}'.format(valid_prec, valid_f1))
            logging.info('训练用时:{}'.format(use_time))
            print('训练集损失:', train_loss, '||', '训练集精确率:', train_prec, '||', '训练f1:', train_f1)
            print('验证集损失:', valid_loss, '||', '验证集精确率:', valid_prec, '||', '验证f1:', valid_f1)
            print(f'训练用时:{use_time}')

    def save_model(self,model):
        if not os.path.exists(self.config.checkpoint_dir):
            os.makedirs(self.config.checkpoint_dir)
        path = os.path.join(self.config.checkpoint_dir,'model.bin')
        torch.save(model.state_dict(),path)

