import torch
import time
from importlib import import_module
import torch.optim as optim
from ModelConfig import ModelArgs
import torch.nn.functional as F
from fast_test import getdataset, getfold
from train_eval import test, evaluate, init_network
from dataprocessor import build_dataset, get_fold
from bertAdam import BertAdam
from utils_fasttext import get_time_dif
import utils_fasttext
from transformers import get_linear_schedule_with_warmup
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
from transformers import AdamW

def train(config, model, train_iter, dev_iter, test_iter, test_dataset):
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    weight = torch.tensor([0.5, 1], dtype=torch.float32, device=config.device)
    loss_fn = torch.nn.CrossEntropyLoss(weight=weight)
    dev_best_loss = float('inf')
    
    for epoch in range(config.num_epochs):
        model.train()
        print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))
        for i, (trains, labels) in enumerate(train_iter):
            outputs = model(trains)
            # model.zero_grad()
            optimizer.zero_grad()
            # loss = F.cross_entropy(outputs, labels)
            loss = loss_fn(outputs, labels)
            loss.backward()
            optimizer.step()
        if epoch > 5:
            scheduler.step() # 学习率衰减
        
        dev_acc, dev_loss = evaluate(config, model, dev_iter)
        print(f'{epoch} Loss: {dev_loss}; AUC: {dev_acc}')
        if dev_loss < dev_best_loss:
            dev_best_loss = dev_loss
            torch.save(model.state_dict(), config.save_path)
            print('is_stored')
    test(config, model, test_iter, test_dataset)
    
    
def n_fold_train(config, model, train_iter, test_iter, test_dataset):
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    weight = torch.tensor([0.5, 1], dtype=torch.float32, device=config.device)
    loss_fn = torch.nn.CrossEntropyLoss(weight=weight)
    dev_best_loss = float('inf')
    total_epoch = 0
    for idx in range(config.fold):
        train_iter, dev_iter = getfold(config, train_data, idx)
        for epoch in range(config.num_epochs):
            model.train()
            print('Epoch [{}/{}]'.format(total_epoch + 1, config.num_epochs*config.fold))
            for i, (trains, labels) in enumerate(train_iter):
                outputs = model(trains)
                # model.zero_grad()
                optimizer.zero_grad()
                # loss = F.cross_entropy(outputs, labels)
                loss = loss_fn(outputs, labels)
                loss.backward()
                optimizer.step()
            if total_epoch > 5:
                scheduler.step() # 学习率衰减
            total_epoch += 1
            dev_acc, dev_loss = evaluate(config, model, dev_iter)
            print(f'{epoch} Loss: {dev_loss}; AUC: {dev_acc}')
            if total_epoch >= 5 and dev_loss < dev_best_loss:
                dev_best_loss = dev_loss
                torch.save(model.state_dict(), config.save_path)
                print('is_stored')
    test(config, model, test_iter, test_dataset)
    

def bertTrain(config, model, train_data, test_iter, test_data):
    # param_optimizer = list(model.named_parameters())
    # no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    # optimizer_grouped_parameters = [
    #     {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
    #     {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    # optimizer = BertAdam(optimizer_grouped_parameters,
    #                      lr=config.learning_rate,
    #                      warmup=0.05,
    #                      t_total=len(train_data) * 0.8 * config.num_epochs)
    optimizer = AdamW(model.parameters(), lr=5e-5, no_deprecation_warning=True)
    total_steps = len(train_data) // config.batch_size * config.num_epochs
    weight = torch.tensor([0.5, 1], dtype=torch.float32, device=device)
    loss_fn = torch.nn.CrossEntropyLoss(weight=weight)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
    dev_best_loss = float('inf')
    for idx in range(config.fold):
        print(f'Fold: {idx}')
        train_iter, dev_iter = get_fold(config, train_data, idx)
        for epoch in range(config.num_epochs):
            model.train()
            val_preds, val_true = [], []
            print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))
            for i, (trains, labels) in enumerate(train_iter):
                outputs = model(trains)
                model.zero_grad()
                loss = F.cross_entropy(outputs, labels)
                # loss = loss_fn(outputs, labels)
                loss.backward()
                optimizer.step()
                scheduler.step()
                val_preds.append(outputs.argmax(dim=1).cpu().numpy())
                val_true.append(labels.cpu().numpy())
            
            val_preds = np.concatenate(val_preds)
            val_true = np.concatenate(val_true)
            precision = precision_score(val_true, val_preds)
            recall = recall_score(val_true, val_preds)
            f1 = f1_score(val_true, val_preds)
                
            dev_acc, dev_loss = evaluate(config, model, dev_iter)
            print(f'{epoch} Loss: {dev_loss}; AUC: {dev_acc}; Precision: {precision}; Recall: {recall}; f1: {f1}')
            if dev_loss < dev_best_loss:
                dev_best_loss = dev_loss
                torch.save(model.state_dict(), config.save_path)
                print('is_stored')
    test(config, model, test_iter, test_data)
    

if __name__ == '__main__':
    model_name= 'BERT_DPCNN'
    gpu_id = 0
    device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')
    x = import_module(f'models.{model_name}')
    if model_name == 'ERNIE' or model_name == 'BERT_DPCNN':
        config = x.Config('healthy')
        config.device = device
        train_data, test_iter, test_data = build_dataset(config)
    else:
        config = x.Config('healthy', 'random')
        config.device = device
        if config.is_fold:
            vocab_size, train_data, test_iter, test_dataset = utils_fasttext.build_dataset(config, False, True)
        else:
            train_iter, dev_iter, test_iter, test_data, vocab_size = getdataset(config)
        config.n_vocab = vocab_size
    config.gpu_id = gpu_id
    model_args = ModelArgs()
    if config.model_name == 'FastText':
        from models.FastText import Model
    elif config.model_name == 'Transformer':
        from models.Transformer import Model
    elif config.model_name == 'TextRNN_Att':
        from models.TextRNN_Att import Model
    elif config.model_name == 'ERNIE':
        from models.BERT_LSTM import Model
    elif config.model_name == 'DPCNN':
        from models.DPCNN import Model
    elif config.model_name == 'TextRCNN':
        from models.TextRCNN import Model
    elif config.model_name == 'BERT_DPCNN':
        from models.BERT_DPCNN import Model
    model = Model(config)
    model.to(device)

    if model_name != 'Transformer' and model_name != 'ERNIE' and model_name != 'BERT_DPCNN':
        init_network(model)
    elif model_name == 'BERT_DPCNN':
        model.bert.resize_token_embeddings(len(config.tokenizer)) 
        new_embedding = torch.randn(model.bert.config.hidden_size)  # 创建新的 embedding
        embedding_weight = model.bert.embeddings.word_embeddings.weight.detach()
        embedding_weight[config.tokenizer.convert_tokens_to_ids('[SPE]')] = new_embedding
        embedding_weight = torch.nn.Parameter(embedding_weight)
        embedding_weight.requires_grad = True
        model.bert.embeddings.word_embeddings.weight = embedding_weight
        
    if config.model_name == 'ERNIE' or model_name == 'BERT_DPCNN':
        bertTrain(config, model, train_data, test_iter, test_data)
    else:
        start_time = time.time()
        if config.is_fold:
            n_fold_train(config, model, train_data, test_iter, test_dataset)
        else:
            train(config, model, train_iter, dev_iter, test_iter, test_data)
        print('Time cost: ', get_time_dif(start_time))
    