import prettytable as pt
import torch
from utils.timer import Timer
import torch.nn as nn
import transformers
from sklearn.metrics import precision_recall_fscore_support, f1_score
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from torch import distributed
import model_utils.build_dataset as BuildDataset
from model_utils import build_dataset
from utils import utils
from utils.set_seed import set_seed
import os
from utils.logger import Logger
from model_utils.build_model import AssembleModel
from utils import get_parser
local_rank = int(os.environ["LOCAL_RANK"])
root_path = '/'.join(os.path.abspath(__file__).split('/')[:-1])

class Trainer(object):
    def __init__(self, model):
        self.model = model

        bert_params = set(self.model.module.bert.parameters())
        other_params = list(set(self.model.module.parameters()) - bert_params)
        no_decay = ['bias', 'LayerNorm.weight']
        params = [
            {'params': [p for n, p in model.module.bert.named_parameters() if not any(nd in n for nd in no_decay)],
             'lr': config.bert_learning_rate,
             'weight_decay': config.weight_decay},
            {'params': [p for n, p in model.module.bert.named_parameters() if any(nd in n for nd in no_decay)],
             'lr': config.bert_learning_rate,
             'weight_decay': 0.0},
            {'params': other_params,
             'lr': config.learning_rate,
             'weight_decay': config.weight_decay},
        ]

        self.optimizer = transformers.AdamW(params, lr=config.learning_rate, weight_decay=config.weight_decay, no_deprecation_warning=True)
        self.scheduler = transformers.get_linear_schedule_with_warmup(self.optimizer,
                                                                      num_warmup_steps=config.warm_factor * updates_total,
                                                                      num_training_steps=updates_total)

    def train(self, epoch, data_loader, _logger=None):
        self.model.train()

        for i, data_batch in enumerate(data_loader):

            loss = model('输入数据') 
            loss.backward()  # 计算网络参数的梯度
            self.optimizer.step() 
            self.optimizer.zero_grad()
            self.scheduler.step()

        if local_rank == 0:
            table = pt.PrettyTable(["Train {}".format(epoch), "Loss", "F1", "Precision", "Recall"])
            table.add_row()
        if _logger:
            _logger.writer("{}\n".format(table))
        return

    def eval(self, epoch, data_loader, is_test=False, _logger=None):
        self.model.eval()
        with torch.no_grad():
            for i, data_batch in enumerate(data_loader):
                pass         

        table = pt.PrettyTable(["{}".format(epoch), 'F1', "Precision", "Recall"])
        table.add_row(["Label"] + ["{:3.4f}".format(x) for x in ['', '', '']])
        
        if _logger:  # 写入log日志文件
            _logger.writer("{}\n".format(table))
        return 

    def save(self, path):
        torch.save(self.model.state_dict(), path)

def load(model, path):
    model.load_state_dict({k.replace('module.',''):v for k,v in torch.load(path).items()})
    return model


def set_gpu(device_ids, seed=42):
        device_ids = list(map(int, device_ids.split(',')))
        device_idx = torch.device('cuda:{}'.format(device_ids[local_rank]))
        set_seed(seed+local_rank)
        torch.cuda.set_device(device_idx)
        dist = distributed.init_process_group(
            backend='nccl'
        )
        print(f'world_size:{distributed.get_world_size()}')
        print(f'get_rank:{distributed.get_rank()}')
        return device_idx, device_ids

def prepare_dataset(config, f_path):
        dataset, ori_data = BuildDataset(config)
        sampler_distribute = DistributedSampler(dataset)
        _iter = DataLoader(dataset=dataset,
                           batch_size=config.batch_size,  
                           drop_last='train' in 'f_path',
                           sampler=sampler_distribute,
                           num_workers=config.workers, 
                           collate_fn=build_dataset.collate_fn) 

        return _iter, len(dataset), sampler_distribute, ori_data

if __name__ == '__main__':
    
    parser = get_parser.get_basic_parser_for_train()

    config = parser.parse_args()
    if local_rank == 0:
        logger = Logger(path=config.log_path)
        logger.writer(str(config)+'\n')
        timer = Timer(config.bert_name)
        
    device_idx, device_ids = set_gpu(device_ids=config.device_ids, seed=config.seed)
    print("Loading Data")
    _loader, _total,sampler_distribute,ori_data=prepare_dataset(config, config.dataset_path) 
    updates_total = _total // config.batch_size * config.epochs
    if local_rank == 0:
        pass
    if config.dev_path:
        dev_loader, dev_total,dev_sampler_distribute,dev_ori_data=prepare_dataset(config, config.dev_path) 
    print("Building Model")
    model = AssembleModel(config)
    if config.pretained_path:
        model = load(model, config.pretained_path)

    model = DistributedDataParallel(model.to(device_idx),
                                    device_ids=[device_ids[local_rank]],
                                    output_device=device_ids[local_rank],
                                    find_unused_parameters=True)
    print('---------Initial model Done.---------')
    if local_rank == 0:
        timer.interval()

    trainer = Trainer(model)

    for i in range(config.epochs):
        print("Epoch: {}".format(i))
        sampler_distribute.set_epoch(i+1)
        if local_rank==0:
            f1 = trainer.train(i, _loader,_logger=logger)
        else:
            f1 = trainer.train(i, _loader)
        if config.dev_path and local_rank == 0:
            dev_sampler_distribute.set_epoch(i+1)
            trainer.eval(i, dev_loader,_logger=logger)

    if local_rank == 0:
        trainer.save(config.save_path)
        timer.finish()
    
