#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:       
@Date     :2022/10/26 14:52:52
@Author      :Charon
@version      :1.0
'''
import copy
import logging
import warnings
from accelerate import Accelerator, load_checkpoint_and_dispatch, DistributedDataParallelKwargs
accelerator = Accelerator(kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)])
# accelerator = Accelerator()
import os
os.environ['TOKENIZERS_PARALLELISM'] = "false"
import sys
from typing import Any, List, Dict, Union
from torch.optim import AdamW
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tools.set_seed import set_seed
from torch.utils.data import DataLoader
import transformers
import numpy as np
from tqdm import tqdm
import pandas as pd
import torch
import time
import wandb
import pkuseg
import jieba
from torch.nn.utils import clip_grad_norm_
from model_utils import (dataset_custom,
                   model_custom)
from model_utils.constant import (Entity, 
                            simple_metric_score,
                            metric_score,
                            PUNCTUATIONS,
                            CONFIG_MODEL_TOKENIZER_CLASSES
                            )

from prettytable import PrettyTable
Table_Handle = PrettyTable()

class Main:
    def __init__(self, config, _logger=None) -> None:
        self.config = config
        self._logger = _logger
        if _logger:
            _logger.write(self.config)
            _logger.write('\n')
        self.id2label = 
        self.model = model_custom.AssembleModel(config=config)
        self.best_model = None

        if len(self.config.pretrained_checkpoint_path) and accelerator.is_local_main_process:
            """
            Please note, as DDP broadcasts model states from rank 0 process to all other processes in the DDP constructor, 
            you do not need to worry about different DDP processes starting from different initial model parameter values.
            """
            self.load_weight(self.config.pretrained_checkpoint_path)
        assert isinstance(config.model_name, str)
        self.tokenizer = CONFIG_MODEL_TOKENIZER_CLASSES[config.model_name][2].from_pretrained(config.model_name)
        if config.inference:
            return
        # plm_params = set(self.model.lm_model.parameters())
        # other_params = list(set(self.model.parameters()) - plm_params)
        if config.multiply_lr:
            config.lm_learning_rate *= accelerator.num_processes
            config.others_learning_rate *= accelerator.num_processes

        no_decay = ['bias', 'LayerNorm.weight']

        params = [
            # BERT主体
            {'params': [p for n, p in self.model.lm_model.named_parameters() if (not any(nd in n for nd in no_decay)) and 'bert.' in n],
            'lr': config.lm_learning_rate,
            'weight_decay': config.weight_decay},
            # BERT的其他部分
            {'params': [p for n, p in self.model.lm_model.named_parameters() if any(nd in n for nd in no_decay)],
            'lr': config.lm_learning_rate,
            'weight_decay': 0.0},
            # 例如BERT的MLM head
            {'params': [p for n, p in self.model.lm_model.named_parameters() if (not any(nd in n for nd in no_decay)) and not 'bert.' in n],
            'lr': config.others_learning_rate,
            'weight_decay': config.weight_decay},
        
        ]

        self.training_dataloader,_total = self.prepare_dataset(f_path=config.train_path, batch=config.batch_size)
        self.validation_dataloader, _ = self.prepare_dataset(f_path=config.dev_path, batch=config.batch_size)
        self.test_dataloader, _ = self.prepare_dataset(f_path=config.test_path, batch=1, predict=True)
        '''
        Any instruction using your training dataloader length (for instance if you want to log the number of total training steps) 
        should go after the call to prepare().
        '''
        updates_total = _total // (config.batch_size) * config.epochs
        self.optimizer = AdamW(params, weight_decay=config.weight_decay, lr=config.lm_learning_rate)
        
        '''
        @Description: 
         # Warmup steps and total steps are based on batches, not epochs
        num_warmup_steps = config.num_batches * config.warmup_ratio 
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, config.num_batches)
        https://github.com/rrmenon10/ADAPET/blob/e82a40ea90ae50243de9689246d5b3ebd4a9d90d/src/train.py#L85
        @Date     :2022/11/12 21:02:17
        @Author      :Charon
        @version      :1.0
        '''
        self.scheduler = transformers.get_scheduler(
                                        config.scheduler_name,
                                        self.optimizer,
                                        num_warmup_steps=config.warm_factor * updates_total,
                                        num_training_steps=updates_total)

        self.training_dataloader, self.model, self.optimizer, self.scheduler = accelerator.prepare(
                                                                                        self.training_dataloader,
                                                                                        self.model,
                                                                                        self.optimizer,
                                                                                        self.scheduler,)
        
        self.validation_dataloader = accelerator.prepare_data_loader(self.validation_dataloader)
        self.test_dataloader = accelerator.prepare_data_loader(self.test_dataloader)

        accelerator.print(f'training_dataloader的数量 = {len(self.training_dataloader)}  ', f'validation_dataloader的数量 = {len(self.validation_dataloader)}')
        
        
    def __call__(self, *args: Any, **kwds: Any) -> Any:
        
        best_f1_score = -1
        step_result = []
        for epoch in range(1, self.config.epochs+1):
            accelerator.print('✨' * 10, f'Epoch = {epoch}', '✨' * 10)

            train_loss, train_result = self.train(epoch)
            dev_loss, dev_result = self.distributed_eval(self.validation_dataloader)
            dev_precision_score = dev_result['precision']
            dev_recall_score = dev_result['recall']
            dev_f1_score = dev_result['f1_score']
            # 目前测起来，reduction='sum'或者'mean'的结果都是一样的，不清楚为什么
            dev_reduce_result = accelerator.reduce(torch.asarray([dev_precision_score, dev_recall_score, dev_f1_score]).to(accelerator.device),
                    reduction='sum')

            test_loss, test_result = self.distributed_predict(epoch, self.test_dataloader, record=True)
            test_precision_score = test_result['precision']
            test_recall_score = test_result['recall']
            test_f1_score = test_result['f1_score']
            test_reduce_result = accelerator.reduce(torch.asarray([test_precision_score, test_recall_score, test_f1_score]).to(accelerator.device),
                    reduction='sum')
            accelerator.wait_for_everyone()

            # 记录
            if accelerator.is_local_main_process:
                all_dev_precision_score = dev_reduce_result[0].item()/accelerator.num_processes
                all_dev_recall_score = dev_reduce_result[1].item()/accelerator.num_processes
                all_dev_f1_score = dev_reduce_result[2].item()/accelerator.num_processes

                all_test_precision_score = test_reduce_result[0].item()/accelerator.num_processes
                all_test_recall_score = test_reduce_result[1].item()/accelerator.num_processes
                all_test_f1_score = test_reduce_result[2].item()/accelerator.num_processes

                train_precision_score = train_result['precision']
                train_recall_score = train_result['recall']
                train_f1_score = train_result['f1_score']
                
                step_result.append({
                        'TEST': {
                        'precision_score': all_test_precision_score,
                        'recall_score': all_test_recall_score,
                        'f1_score': all_test_f1_score,
                        },

                        'DEV': {
                        'precision_score': all_dev_precision_score,
                        'recall_score': all_dev_recall_score,
                        'f1_score': all_dev_f1_score,
                        },

                        'TRAIN': {
                        'train_loss': train_loss,
                        'precision_score': train_precision_score,
                        'recall_score': train_recall_score,
                        'f1_score': train_f1_score,
                        }, 
                    })
                wandb.log(step_result[-1],
                    step=epoch
                    )

                if all_test_f1_score > best_f1_score:
                    if 'large' not in self.config.model_name:
                        self.best_model = copy.deepcopy(self.model)
                    if len(self.config.checkpoint_save_path):
                        self.save_weight(all_test_f1_score)
                    if 'large' in self.config.model_name and self.config.checkpoint_save_path is None:
                        print('checkpint no save!!!')
                    print('save best model')

                best_f1_score = max(best_f1_score, all_test_f1_score)
                
                Table_Handle.clear()
                Table_Handle.field_names = ['Names','Values']
                Table_Handle.add_rows([
                    ['main_train_precision_score', train_precision_score],
                    ['main_train_recall_score', train_recall_score],
                    ['main_train_f1_score', train_f1_score],
                    ['train_loss', train_loss],
                    ['dev_precision_score', all_dev_precision_score],
                    ['dev_recall_score', all_dev_recall_score],
                    ['dev_test_f1_score', all_dev_f1_score],
                    ['test_precision_score', all_test_precision_score],
                    ['test_recall_score', all_test_recall_score],
                    ['test_f1_score', all_test_f1_score],
                ])

                print(Table_Handle)
                print(f'best_f1_score : {best_f1_score}')
                
    def prepare_dataset(self, f_path, batch=16, shuffle=False, predict=False):
        datasets = dataset_custom.BuildDataset(config=self.config, 
                                                f_path=f_path, 
                                                _tokenizer=self.tokenizer, 
                                                predict=predict,
                                                pet_patterns_idx=self.config.pet_patterns_idx)
        _iter = DataLoader(dataset=datasets,
                        batch_size=batch,
                        drop_last='train' in 'f_path',
                        num_workers=self.config.workers,
                        shuffle=shuffle, 
                        collate_fn=None if predict else dataset_custom.PadBatch)

        accelerator.print(f'{f_path} datasets的数量 = {len(datasets)}  ', f' DataLoader的数量 = {len(_iter)}')
        return _iter, len(datasets)

    def train(self, epoch):
        self.model.train()
        total_loss = 0.0
        step = 0
        correct = 0
        total_gold_labels = []
        total_pred_labels = []
        for batch in self.training_dataloader:
            step += 1
            self.optimizer.zero_grad()
            loss, lbl_logits = self.model(**batch)

            # 可以打印学习率的变化情况
            # accelerator.print(self.optimizer.state_dict()['param_groups'][0]['lr'])

            total_loss += loss.item() 


            clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
            self.optimizer.step()
            self.scheduler.step()

            
        result = simple_metric_score(y_true = total_gold_labels, y_pred = total_pred_labels)
        total_pet_disc_loss /= step
        total_pet_mlm_loss /=step

        return total_loss, result

    def distributed_eval(self, dataloader):
        step = 0
        total_loss = 0.0
        total_gold_labels = []
        total_pred_labels = []
        with torch.no_grad():
            self.model.eval() 
            for batch in dataloader:
                step+=1
                loss, lbl_logits = self.model(dev=True, **batch)
                total_loss += loss.item() 
                gold_labels = batch['label_class_id'].view(-1).to('cpu').detach().numpy()
                total_gold_labels.extend(gold_labels)
                pred_labels = torch.argmax(lbl_logits, dim=-1).view(-1).to('cpu').detach().numpy()
                total_pred_labels.extend(pred_labels)

        result = simple_metric_score(y_true = total_gold_labels, y_pred = total_pred_labels)

        return total_loss/step , result

    def distributed_predict(self, epoch, dataloader, record=False):
        preds_set = set()
        trues_set = set()
        
        with torch.no_grad():
            self.model.eval()
            for step, batch in enumerate(tqdm(dataloader, 
                                        desc='Predicting......', 
                                        disable=not accelerator.is_local_main_process)):
                pass

            result = metric_score(true_entities = trues_set, pred_entities = preds_set)

            return None, result
    
    def predictor(self, _dir, save_dir=None, already=False):  
        """
        @param  :
        _dir: 是需要predict的文件路径
        save_dir: 是需要保存结果的文件路径
        already: 当为True的时候，即为训练完之后直接predict（使用best_model）
        -------
        @Returns  :
        -------
        @description  :
        -------
        """
        preds_set = set()
        trues_set = set()
        test_dataloader, _ = self.prepare_dataset(f_path=_dir, batch=1, predict=True)
        
        if already:
            if self.best_model:
                self.model = self.best_model
                return self.distributed_predict(epoch=-1, dataloader=test_dataloader)[1]
            assert self.config.checkpoint_save_path is not None
            self.load_weight(self.config.checkpoint_save_path[:-3] + '_' +self.config.id + '.pt')
            
            if accelerator.num_processes>1:
                result = self.distributed_predict(epoch=-1, dataloader=test_dataloader)[1]
                self.delete_weight(self.config.checkpoint_save_path[:-3] + '_' +self.config.id + '.pt')
                return result

        if not already:
            self.model = accelerator.prepare_model(self.model)

        device = next(self.model.parameters()).device
            
        with torch.no_grad():
            self.model.eval()
            for batch in tqdm(test_dataloader, desc='Predicting......',
                              disable=not accelerator.is_local_main_process):
                pass
                    
                
                # 记录查看数据情况
                if accelerator.is_local_main_process and self._logger:
                    pass

        metric_result = metric_score(true_entities = trues_set, pred_entities = preds_set)
        
        # if already:
        #     self.delete_weight(self.config.checkpoint_save_path[:-3] + '_' +self.config.id + '.pt')
        return metric_result

    
    def save_weight(self, score):
        _path = self.config.checkpoint_save_path[:-3] + '_' +self.config.id + '.pt'
        _root_path = os.path.dirname(self.config.checkpoint_save_path)
        for filename in os.listdir(_root_path):
            if self.config.id in filename:
                self.delete_weight(os.path.join(_root_path, filename))
        accelerator.save(accelerator.unwrap_model(self.model).state_dict(), _path)

    def delete_weight(self, path):
        if(os.path.isfile(path)):
            os.remove(path)
        else:
            print('文件不存在')
    def load_weight(self, path):
        
        # missing_keys, unexpected_keys=self.model.load_state_dict({ 'module.' + k:v for k,v in torch.load(path).items()}, strict=False)
        if accelerator.num_processes > 1 and len(self.config.pretrained_checkpoint_path)==0:
            missing_keys, unexpected_keys=self.model.load_state_dict({ 'module.' + k:v for k,v in torch.load(path).items()}, strict=False)
        else:
            missing_keys, unexpected_keys=self.model.load_state_dict(torch.load(path), strict=False)
            # missing_keys, unexpected_keys=self.model.load_state_dict({k:v for k, v in torch.load(path).items() if 'mask_idx_lkup' not in k}, strict=False)
        print(f'missing_keys: {missing_keys}', '----', f'unexpected_keys: {unexpected_keys}')

        
    def early_stop(self, step_result: List[float], min_step=3):
        """
        @param  :
        step_result: 每次epoch记录的结果
        min_step: 往前最小的评估step
        -------
        @Returns  :
        True: 早停
        False: 不早停
        -------
        @description  :
        ---------
        用于判断是否是早停。但是这个方法需要自己每次手写，因为每个项目的判断metric是不同的
        """
        if len(step_result) < min_step:
            return False
        