import os
import sys
import argparse
import configparser
import shutil
import copy
from utils.set_seed import set_seed
import torch
import warnings
import numpy as np
from utils import seed
from transformers import AutoTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from torch import distributed
from torch.utils.data import DataLoader
from model_utils.build_dataset import BuildDataset, label2id, id2label, PadBatch
from model_utils.build_model import AssembleModel
from utils.logger import Logger
from utils.timer import Timer
from sklearn.metrics import accuracy_score, recall_score, f1_score, precision_score


root_path = '/'.join(os.path.abspath(__file__).split('/')[:-1]) # 获取项目的根目录
config = configparser.ConfigParser()
config.read(os.path.join(root_path, 'config.ini'), encoding='utf-8')


class Main:
    def __init__(self):
        # 读取config.ini里面的参数信息
        self.model_name = config.get('hugging_model', 'model_name')
        self.train_dataset_path = config.get('datasets', 'train_dataset_path')

        self.warmup_rate = config.getfloat('implementation', 'warmup_rate')
        self.dropout_rate = config.getfloat('implementation', 'dropout_rate')
        self.batch_size = config.getint('implementation', 'batch_size')
        self.lr = config.getfloat('implementation', 'lr')


        self.output_result_path = config.get('result', 'output_result_path')
        self.output_model_path = config.get('result', 'output_model_path')
        self.logs_path = config.get('result', 'logs_path')
        self.pretain_checkpoint_path = config.get('result', 'pretain_checkpoint_path')

    def load_model(self, labels_num):
        model = AssembleModel(model_name=self.model_name,
                              labels_num=labels_num, 
                              dropout_rate=self.dropout_rate, fine_tuning=self.fine_tuning)
        '''
        # 根据需要选择是否加载模型权重代码
        if self.pretain_checkpoint_path:
            weights = torch.load(self.pretain_checkpoint_path)
            weights_dict = {}
            for k, v in weights.items():
                new_k = k.replace('module.', '') if 'module' in k else k
                weights_dict[new_k] = v
            model.load_state_dict(weights_dict)
        '''
        return model

    def prepare_dataset(self, f_path, tokenizer):
        dataset = BuildDataset(f_path=f_path, seq_length=self.seq_length, tokenizer=tokenizer)
        sampler_distribute = DistributedSampler(dataset)
        _iter = DataLoader(dataset=dataset,
                           batch_size=self.batch_size,  # how many samples per batch to load (default: 1).
                           # shuffle=True, # 与sampler互斥
                           sampler=sampler_distribute,
                           num_workers=2,  # how many subprocesses to use for data loading
                           collate_fn=PadBatch)  # merges a list of samples to form a mini-batch of Tensor(s)
        return _iter, len(dataset), sampler_distribute

    def train_precess(self, e, model, optimizer, scheduler, dataset, device, local_rank=0):
        model.train()  # 启用 Batch Normalization 和 Dropout（如果模型中有BN层(Batch Normalization）和Dropout）
        step = 0
        total_loss = 0.
        for i, batch_sample in enumerate(dataset):
            step += 1
            token_ids_tensor, mask_tensor, labels_tensor = batch_sample
            token_ids_tensor = token_ids_tensor.to(device)
            labels_tensor = labels_tensor.to(device)
            mask_tensor = mask_tensor.to(device)

            loss,_ = model(token_ids_tensor, labels_tensor, mask_tensor)
            total_loss += loss.item()
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()  # 梯度清零
            scheduler.step()

        print("Epoch: {}, Loss:{:.4f}".format(e, total_loss / step))
        return total_loss / step, model, optimizer, scheduler

    def evaluate_precess(self, e, model, dataset, device):
        model.eval()
        Y_true, Y_hat = [], []
        step = 0
        
        # 下面是NER任务的处理demo
        with torch.no_grad():
            for i, batch_sample in enumerate(dataset):
                step += 1
                token_ids_tensor, mask_tensor, labels_tensor = batch_sample
                token_ids_tensor = token_ids_tensor.to(device)
                labels_tensor = labels_tensor.to(device)
                mask_tensor = mask_tensor.to(device)
                _,y_hat = model(token_ids_tensor, None, mask_tensor)
                for y_h, y_t ,mask in zip(y_hat,labels_tensor.cpu(), mask_tensor.cpu()):
                    for index, ele in enumerate(mask):
                        if ele>0:
                            Y_hat.append(y_h[index])
                            Y_true.append(y_t[index])
                        else:
                            break
        Y_true = np.array(Y_true)
        Y_hat = np.array(Y_hat)
        p = precision_score(y_true=Y_true, y_pred=Y_hat, average='macro', zero_division=1) * 100
        r = recall_score(Y_true, Y_hat, average='macro', zero_division=1) * 100
        f1 = 2 * p * r / (p + r)
        print("Epoch {}, P:{:.3f}, R:{:.3f}, F1:{:.3f}".format(e, p, r, f1))
        return p,r,f1


    def save_model(self, model, fine_tuning=False):
        downstream = {}
        if hasattr(model, "module"): # 分布式训练过程中key中含有‘module’字段，因此要删除，方便别人使用
            for key, value in model.module.state_dict().items():  
                new_k = key.replace('module.', '') if 'module' in key else key
                downstream[new_k] = value
            torch.save(downstream, self.output_model_path)
        else:
            if fine_tuning:
                torch.save(model.state_dict(), self.output_model_path)


if __name__ == '__main__':
    timer = Timer()
    main = Main()
    parser = argparse.ArgumentParser()

    # 设置显卡——start
    parser.add_argument('--device_ids', type=str, help='显卡编号序列')    
    args = parser.parse_args()
    local_rank = int(os.environ["LOCAL_RANK"])  # 通过torchrun的方法来获取local_rank(进程号，一般有几张卡就有几个进程，编号从0开始)
    device_ids = list(map(int, args.device_ids.split(',')))
    device_idx = torch.device('cuda:{}'.format(device_ids[local_rank]))
    seed.set_seed(42+local_rank)  # 每一张卡都设定不同的随机化种子(看情况了)
    print('当前进程Local_rank：', local_rank)
    '''
        表明当前进程使用的GPU号，如果不写这句代码，所有的进程都默认的在你使用CUDA_VISIBLE_DEVICES参数设定的0号GPU上面启动。
    '''
    torch.cuda.set_device(device_idx)
    '''先初始化分布式通信'''
    dist = distributed.init_process_group(
        backend='nccl'
    )
    # 设置显卡——end


    # 加载数据——start
    tokenizer = AutoTokenizer.from_pretrained(config.get('hugging_model', 'model_name'))
    train_iter, train_size, train_sampler_distribute = main.prepare_dataset(f_path=main.train_dataset_path,
                                                                            tokenizer=tokenizer)
    train_step = train_size//main.batch_size * main.epoch_size if train_size%main.batch_size==0 else (train_size//main.batch_size +1)*main.epoch_size

    dev_iter, _, dev_sampler_distribute = main.prepare_dataset(f_path=main.dev_dataset_path,
                                                               tokenizer=tokenizer)
    # 加载数据——end
    print('------Load Data Done.-------')

    # 加载模型——start
    model = main.load_model(labels_num=len(id2label))
    # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)  # BN同步，但是NLP中没有使用BN同步
    model = DistributedDataParallel(model.to(device_idx),
                                    device_ids=[device_ids[local_rank]],
                                    output_device=device_ids[local_rank],
                                    find_unused_parameters=True)
    # 加载模型——end 
    print('---------Initial model Done.---------')

    # 梯度优化——start
    optimizer_param = list(model.named_parameters())  # named_parameters()获取模型中的参数和参数名字
    """实现L2正则化接口，对模型中的所有参数进行L2正则处理，包括权重w和偏置b"""
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'backbone']  # no_decay中存放不进行权重衰减的参数
    # any()函数用于判断给定的可迭代参数iterable是否全部为False，则返回False，如果有一个为True，则返回True
    # 判断optimizer_param中所有的参数。如果不在no_decay中，则进行权重衰减;如果在no_decay中，则不进行权重衰减
    # 这个方法还可以实现 非反向传播https://blog.csdn.net/LoseInVain/article/details/105461904
    optimizer_grouped_parameters = [
        {'params': [param for name, param in optimizer_param if
                    not any((no_decay_name in name) for no_decay_name in no_decay)], 'weight_decay': 0.01},
        {'params': [param for name, param in optimizer_param if
                    any((no_decay_name in name) for no_decay_name in no_decay)], 'weight_decay': 0.0}
    ]
    # 使用带有权重衰减功能的Adam优化器Adamw
    optimizer = AdamW(optimizer_grouped_parameters, lr=main.lr, eps=1e-8, no_deprecation_warning=True)
    # 实现学习率预热,optimizer为优化器类,num_warmup_steps为训练多少步进行预热,num_training_steps为总共训练的次数
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=train_step*main.warmup_rate,
                                                num_training_steps=train_step)
    # 梯度优化——end
    print('---------optimizer load Done.---------')



    print('------Start training.------')
    if local_rank == 0:
        if os.path.isdir(config.get('result', 'logs_path')):
            shutil.rmtree(config.get('result', 'logs_path'))
            print('删除已经存在的文件夹')
        writer = SummaryWriter(config.get('result', 'logs_path')) # TensorBorad加载（具体方法可以查看相关文档）
        writer.add_text(tag='Note', text_string='*********')

    best_f1 = 0.0
    best_loss = 100000
    for epoch in range(1, main.epoch_size + 1):
        train_sampler_distribute.set_epoch(epoch)  # set_epoch用于打乱每次的训练数据顺序
        dev_sampler_distribute.set_epoch(epoch)

        train_loss, model, optimizer, scheduler = main.train_precess(epoch, model, optimizer,
                                                                     scheduler, train_iter,
                                                                     device=device_idx)

        if local_rank == 0:
            if args.is_dev is True:
                p, r, f1 = main.evaluate_precess(epoch, model, dev_iter, device=device_idx)
            # 展示在一个view里面，但是还是5个图
            writer.add_scalar('view/train_loss', train_loss, epoch)
            # writer.add_scalar('view/val_f1s', f1, epoch)
            
        # 根据需要决定是不是需要在训练过程中保存最好的模型（如果模型太大，可能会超出内存）
        if train_loss < best_loss and epoch > 10:
            best_loss = train_loss
            best_model = copy.deepcopy(model)  # 一定是要深copy才可以

    if local_rank == 0:  # 因为每张卡上的权重在最后会共享，因此这里就只保存一个就可以了
        writer.add_hparams(hparam_dict={'lr':main.lr, 
        'batch': main.batch_size, 
        'model_name': main.model_name, 
        'epoch':main.epoch_size},
        metric_dict={'best_loss':best_loss})
        writer.close()
        # 如果文件存在，则先删除权重文件
        if os.path.isfile(config.get('result', 'output_model_path')):
            os.remove(config.get('result', 'output_model_path'))
            print('删除已存在的模型')
        main.save_model(model, fine_tuning=main.fine_tuning)
        print('------------Save Model Done.------------')

    timer.finish()
