from __future__ import annotations
from collections import defaultdict
from copy import deepcopy
import datetime
from math import floor
from operator import truediv
import time
from hamcrest import none
import torch
from torch.utils.data import DataLoader
from pycm import *
from torch.utils.tensorboard import SummaryWriter
import os
from typing import TYPE_CHECKING, Dict, List, Tuple
import json

from ner.dataset import Dataset
if TYPE_CHECKING:
    from ner.ner_model import NerModel

class Trainer:

    @staticmethod
    def _current_time_to_int() -> int:
        t = datetime.datetime.now()
        y = t.year;        mo = t.month;        d = t.day
        h = t.hour;        mi = t.minute;        s = t.second
        value = 0
        value += y; value *= 100
        value += mo; value *= 100
        value += d; value *= 100
        value += h; value *= 100
        value += mi; value *= 100
        value += s; value *= 100
        return value

    def __init__(self, known_labels:List[str], model:NerModel, train_batch_size:int, test_batch_size:int, learning_rate:float) -> None:        
        os.environ["TOKENIZERS_PARALLELISM"] = "true"
        #self.model = model
        self.known_labels = known_labels
        self.train_batch_size = train_batch_size
        self.test_batch_size = test_batch_size
        # CPU 和 GPU 的确定
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print(f'使用{self.device}')
        model = model.to(self.device)
        # 优化器
        self.learning_rate = learning_rate
        self.optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
        # 交叉熵损失
        self.ce_criterion = torch.nn.CrossEntropyLoss()
        self.ce_criterion = self.ce_criterion.to(self.device)
        # 其他
        self.current_epoch = 0
        self.epoch_end_batch_count = 0
        self.prev_performance = 0
        self.train_id = Trainer._current_time_to_int()#abs(hash(datetime.datetime.now()))
        print(f'训练标识 {self.train_id}')
        self.tensorboard_writer = SummaryWriter(f'./runs/{self.train_id}', '{self.train_id}')

    @staticmethod
    def load_from(model_file_name:str, model:NerModel, train_batch_size:int = None, test_batch_size:int = None, learning_rate:float = None):        
        checkpoint = torch.load(model_file_name)
        print(f'从 {model_file_name} 载入训练器')
        if train_batch_size is None:
            train_batch_size = checkpoint['train_batch_size']
        if test_batch_size is None:
            test_batch_size = checkpoint['test_batch_size']
        if learning_rate is None:
            learning_rate = checkpoint['learning_rate']
        known_labels = checkpoint['known_labels']
        trainer = Trainer(known_labels, model, train_batch_size, test_batch_size, learning_rate)
        trainer.current_epoch = checkpoint['epoch'] + 1
        trainer.epoch_end_batch_count = checkpoint['batch_count']
        trainer.optimizer.load_state_dict(checkpoint['optimizer_state'])
        trainer.train_id = checkpoint['train_id']
        return trainer

    def train(self, model:NerModel, train_dataset:Dataset, test_dataset:Dataset, train_data_loader:DataLoader, test_data_loader:DataLoader, num_epochs:int, test_every_n_epoch:int, save_every_n_epoch:int, dataset_name:str) -> None:        
        self.write_hot_config()
        self.dataset_name = dataset_name
        train_time_start = time.perf_counter()
        time_spot_start = time.perf_counter()
        for i_epoch in range(self.current_epoch, num_epochs):
            epoch_loss_values = []
            #epoch_out_refined = []
            print(f'本轮将有 {len(train_data_loader)} 批')
            for pg in self.optimizer.param_groups:
                print(f'学习率 {pg["lr"]}')
            for i_batch, (bert_token_ids, bert_token_types, bert_masks, gold_label_ids, input_strings, word_vec_input_tensors) in enumerate(train_data_loader):
                if model.pretrained_model_name is None and model.word_vec_encoder_set.count() == 0:
                    print('预训练模型和词向量都不存在')
                    return
                # 传入 GPU 或 CPU
                # word_vec_embeddings = self.to_device(word_vec_embeddings)
                model = self.to_device(model)
                bert_masks = self.to_device(bert_masks)
                bert_token_ids = self.to_device(bert_token_ids)
                bert_token_types = self.to_device(bert_token_types)
                gold_label_ids = self.to_device(gold_label_ids)
                # 训练该批次
                # 模型设置为训练状态
                model.train()
                # 清除上一步的梯度, 防止积累
                self.optimizer.zero_grad()
                bool_masks = bert_masks == 1
                if model.use_crf:
                    # 分类之前的计算
                    out = model.forward_before_classification(
                                                            word_vec_input_tensors,
                                                            bert_token_ids,
                                                            bert_token_types,
                                                            bert_masks,
                                                            self.device,
                                                            )
                    # CRF 损失，已批次内平均化
                    loss = model.neg_log_likelihood_loss_2(out,
                                                            bool_masks,
                                                            gold_label_ids,
                                                            )
                    # 分类的计算
                    out = model.classify(out, bert_masks)
                    # out = model(input_strings, 
                    #             word_vec_input_tensors,
                    #             bert_token_ids,
                    #             bert_token_types,
                    #             bert_masks,
                    #             gold_label_ids,
                    #             self.device
                    #             )
                    # loss = model.neg_log_likelihood_loss(input_strings,
                    #                                     word_vec_input_tensors,
                    #                                     bert_token_ids,
                    #                                     bert_token_types,
                    #                                     bert_masks,
                    #                                     gold_label_ids,
                    #                                     self.device,
                    #                                     )
                    out_refined, gold_label_ids_refined = self.flatten_and_refine(out, gold_label_ids, bool_masks)
                else:
                    out = model(input_strings, 
                                word_vec_input_tensors,
                                bert_token_ids,
                                bert_token_types,
                                bert_masks,
                                gold_label_ids,
                                self.device
                                )
                    out_refined, gold_label_ids_refined = self.flatten_and_refine(out, gold_label_ids, bool_masks)
                    # 交叉熵损失，已批次内平均化
                    loss = self.ce_criterion(out_refined, gold_label_ids)
                # 后向计算
                loss.backward()
                # 更新模型参数
                self.optimizer.step()
                # 记录损失值
                batch_losss_value = loss.item()
                epoch_loss_values.append(batch_losss_value)
                #epoch_out_refined.append(out_refined)
                # 计时
                time_spot = time.perf_counter()
                # 批次信息
                print(f'训练标识 {self.train_id}, 模型 {model.get_description()}, 第 {i_epoch} 轮, 第 {i_batch} 批, 损失 {round(batch_losss_value, 2)}, 用时 {round(time_spot - time_spot_start, 3)} 秒', )
                time_spot_start = time_spot

            self.epoch_end_batch_count += len(train_data_loader) * self.train_batch_size

            # 训练统计
            epoch_mean_loss_value = sum(epoch_loss_values) / len(epoch_loss_values)
            print(f'训练标识 {self.train_id}, 第 {i_epoch} 轮, 轮平均损失 {round(epoch_mean_loss_value, 2)}', )
            self.statistics(True, model, train_dataset, '训练(该轮最后一批)', self.train_batch_size, i_epoch, out_refined, gold_label_ids_refined,  epoch_mean_loss_value)
            self.statistics(False, model, train_dataset, '训练(该轮最后一批)', self.train_batch_size, i_epoch, out_refined, gold_label_ids_refined, epoch_mean_loss_value)
            
            # 测试
            if (i_epoch + 1) % test_every_n_epoch == 0:
                print('开始测试')
                performance = self.test(model, train_dataset, test_data_loader, self.epoch_end_batch_count, i_epoch)    
                # if self.auto_stop(performance, test_every_n_epoch, stop_criterion, train_time_start):
                #     return

            # 保存模型
            if (i_epoch + 1) % save_every_n_epoch == 0:
                self._save_state(model, train_dataset, test_dataset, i_epoch, self.epoch_end_batch_count)
        
        print(f'训练结束')
        hours, minutes, seconds = self.time_span_since_start(train_time_start)
        print(f'训练总时长 {hours} 小时 {minutes} 分 {seconds} 秒')

    def test(self, model:NerModel, dataset:Dataset, test_data_loader:DataLoader, batch_count:int, i_epoch:int) -> float:
        time_spot_start = time.perf_counter()
        # 模型设置为训练状态
        model.eval()
        with torch.no_grad():
            all_out_label_ids = []
            all_bool_masks = []
            all_gold_label_ids = []
            all_input_strings = []
            test_loss_values = []
            for i_batch, (bert_token_ids, bert_token_types, masks, gold_label_ids, input_strings, word_vec_input_tensors) in enumerate(test_data_loader):
                # 数据传入 GPU 或 CPU
                # if word_vec_embeddings is not None:
                #     word_vec_embeddings = word_vec_embeddings.to(self.device)
                if bert_token_ids is not None:
                    bert_token_ids = bert_token_ids.to(self.device)
                    bert_token_types = bert_token_types.to(self.device)
                masks = masks.to(self.device)
                bool_masks = masks == 1 # True False 式的 mask
                bool_masks = bool_masks.to(self.device)
                gold_label_ids = gold_label_ids.to(self.device)
                if model.crf is not None:
                    out = model.forward_before_classification(
                                                            word_vec_input_tensors,
                                                            bert_token_ids,
                                                            bert_token_types,
                                                            masks,
                                                            self.device,
                                                            )
                    loss = model.neg_log_likelihood_loss_2(out,
                                                            bool_masks,
                                                            gold_label_ids,
                                                            )
                    out = model.classify(out, masks)
                    out_refined, gold_label_ids_refined = self.flatten_and_refine(out, gold_label_ids, bool_masks)
                else:
                    out = model(input_strings, 
                                    word_vec_input_tensors,
                                    bert_token_ids,
                                    bert_token_types,
                                    masks,
                                    gold_label_ids,
                                    self.device,
                                    )
                    out_refined, gold_label_ids_refined = self.flatten_and_refine(out, gold_label_ids, bool_masks)
                    loss = self.ce_criterion(out_refined, gold_label_ids_refined)
                test_loss_values.append(loss.item())
                # 批次数据汇入总数据
                for single_out_label_ids, single_bool_masks, single_gold_label_ids in zip(out, bool_masks, gold_label_ids):
                    all_out_label_ids.append(single_out_label_ids)
                    all_bool_masks.append(single_bool_masks)
                    all_gold_label_ids.append(single_gold_label_ids)
                all_input_strings.extend(input_strings)
                
                time_spot = time.perf_counter()
                print(f'\t第 {i_batch} 批, 用时 {round(time_spot - time_spot_start, 3)} 秒', )
                time_spot_start = time_spot
            
            # 所有批次结果合并
            time_spot_start = time.perf_counter()
            control_label_index = self.known_labels.index('控制标签')
            all_out_label_ids = torch.nn.utils.rnn.pad_sequence(all_out_label_ids, batch_first=True, padding_value = control_label_index)
            all_bool_masks = torch.nn.utils.rnn.pad_sequence(all_bool_masks, batch_first=True, padding_value = 0)
            all_gold_label_ids = torch.nn.utils.rnn.pad_sequence(all_gold_label_ids, batch_first=True, padding_value = control_label_index)
            # 统计结果
            epoch_loss = sum(test_loss_values) / len(test_loss_values)
            out_refined, gold_label_ids_refined = self.flatten_and_refine(all_out_label_ids, all_gold_label_ids, all_bool_masks)
            out_refined = out_refined.to(self.device)
            performance = self.statistics(True, model, dataset, '测试', self.test_batch_size, i_epoch, out_refined, gold_label_ids_refined, epoch_loss)
            performance = self.statistics(False, model, dataset, '测试', self.test_batch_size, i_epoch, out_refined, gold_label_ids_refined, epoch_loss)
            time_spot = time.perf_counter()
            print(f'\t合并统计，用时 {round(time_spot - time_spot_start, 3)} 秒', )
            # 输出错误标注
            print(f'\t正在输出所有带有错误标注的句子', )
            out_refined, gold_label_ids_refined = self.refine_only(all_out_label_ids, all_gold_label_ids, all_bool_masks)
            self.log_labeling_errors(model, dataset, i_epoch, all_input_strings, out_refined, gold_label_ids_refined)
        return performance

    def train_batch(self, model:NerModel, bert_token_ids:torch.Tensor, bert_token_types, bert_masks, gold_label_ids, input_strings, word_vec_input_tensors):
        # 模型设置为训练状态
        model.train()
        # 清除上一步的梯度, 防止积累
        self.optimizer.zero_grad()
        bool_masks = bert_masks == 1
        if model.use_crf:
            # 分类之前的计算
            out = model.forward_before_classification(
                                                    word_vec_input_tensors,
                                                    bert_token_ids,
                                                    bert_token_types,
                                                    bert_masks,
                                                    self.device,
                                                    )
            # CRF 的损失
            loss = model.neg_log_likelihood_loss_2(out,
                                                    bool_masks,
                                                    gold_label_ids,
                                                    )
            out = model.classify(out, bert_masks)
            out_refined, gold_label_ids = self.flatten_and_refine(out, gold_label_ids, bool_masks)
        else:
            out = model(input_strings, 
                        word_vec_input_tensors,
                        bert_token_ids,
                        bert_token_types,
                        bert_masks,
                        gold_label_ids,
                        self.device
                        )
            out_refined, gold_label_ids = self.flatten_and_refine(out, gold_label_ids, bool_masks)
            loss = self.ce_criterion(out_refined, gold_label_ids)
        # 后向计算
        loss.backward()
        # 更新模型参数
        self.optimizer.step()
        batch_size, _ = bert_masks.size()
        average_loss = loss.item() / batch_size
        return average_loss, out_refined, gold_label_ids

    def to_device(self, t:torch.Tensor) -> torch.Tensor:
        if t is not None:
            return t.to(self.device)
        return t

    def auto_stop(self, performance:float, test_every_n_epoch:int, stop_criterion:float, train_time_start:float):
        # 自动中止
        performance_increment = performance - self.prev_performance
        self.prev_performance = performance
        slope = performance_increment / test_every_n_epoch
        if slope > 0 and slope < stop_criterion:
            print(f'经过 {test_every_n_epoch} 轮，性能提升 {performance_increment}，提升速度 {slope} 每轮，低于阈值 {stop_criterion}，训练结束')
            hours, minutes, seconds = self.time_span_since_start(train_time_start)
            print(f'训练总时长 {hours} 小时 {minutes} 分 {seconds} 秒')
            return True
        else:
            print(f'经过 {test_every_n_epoch} 轮，性能提升 {performance_increment}，提升速度 {slope} 每轮')
            return False

    def flatten_and_refine(self, out_label_tensor, gold_label_indices, bool_mask):
        '''将所有序列首尾连接成一维, 同时去除 [PAD] 标签'''
        if out_label_tensor.dtype is torch.float32 or out_label_tensor.dtype is torch.float64:
            out_label_tensor = out_label_tensor.reshape(-1, len(self.known_labels)) # 从(b_size, seq_len, n_lab)变为(b_size * seq_len, n_lab) 
        elif out_label_tensor.dtype is torch.int32 or out_label_tensor.dtype is torch.int64:
            out_label_tensor = out_label_tensor.reshape(-1) # 从(b_size, seq_len)变为(b_size * seq_len) 
        else:
            raise Exception('意外的张量数据类型')
        gold_label_indices = gold_label_indices.reshape(-1) # 从(b_size, seq_len)变为(b_size * seq_len)
        bool_mask = bool_mask.reshape(-1) # 从(b_size, seq_len)变为(b_size * seq_len)
        out_label_tensor = out_label_tensor[bool_mask]
        gold_label_indices = gold_label_indices[bool_mask]
        return out_label_tensor, gold_label_indices

    def refine_only(self, out_label_tensor, gold_label_tensor, mask):
        '''仅去除 [PAD] 标签'''
        out_labels = []
        gold_labels = []
        for i in range(out_label_tensor.size(dim=0)):
            sentence_out_labels = out_label_tensor[i, :]
            sentence_gold_labels = gold_label_tensor[i, :]
            sentence_mask = mask[i, :]
            sentence_selector = sentence_mask == 1
            out_labels.append(sentence_out_labels[sentence_selector].to('cpu'))
            gold_labels.append(sentence_gold_labels[sentence_selector].to('cpu'))
        return out_labels, gold_labels

    def log_labeling_errors(self, model:NerModel, dataset:Dataset, i_epoch:int, in_strings:List[str], out_label_tensor:List[torch.Tensor], gold_label_indices:List[torch.Tensor]):
        '''将所有包含错误预测的句子输出'''
        # CRF 输出结果是整数标签，而 Softmax 输出结果是浮点向量，这里统一为整数标签
        for i in range(len(out_label_tensor)):            
            if out_label_tensor[i].dtype is torch.float32 or out_label_tensor[i].dtype is torch.float64:
                out_label_tensor[i] = out_label_tensor[i].argmax(dim = 1)

        if not os.path.exists(f'./statistics/{self.train_id}'):
            os.makedirs(f'./statistics/{self.train_id}')
        with open(f'./statistics/{self.train_id}/错误标注记录_{model.get_description()}_{dataset.corpus_type}.md', 'a+', encoding='utf8') as f:
            f.write(f'# 错误标注记录第 {i_epoch} 轮 {datetime.datetime.now()}\n\n')
            f.write(f'## 标签\n\n')
            for i, label_text in enumerate(self.known_labels):
                f.write(f'{i}. {label_text}\n\n')
            f.write(f'## 错误标注\n\n')
            error_sentence_count = 0
            for in_string, out, gold in zip(in_strings, out_label_tensor, gold_label_indices):
                difference = out - gold
                if difference.count_nonzero() > 0:
                    f.write(f'|原文|{"|".join([c for c in in_string])}|\n')
                    f.write(f'|----|{"|".join(["----" for c in in_string])}|\n')
                    f.write(f'|标准标签|{"|".join([str(i) for i in gold.tolist()])}|\n')
                    f.write(f'|预测标签|{"|".join([str(i) for i in out.tolist()])}|\n\n')
                    error_sentence_count += 1
            f.write(f'共有 {error_sentence_count} 个句子出现了错误标注\n\n')

    def statistics(self, combine_label_head_and_tail:bool, model:NerModel, dataset:Dataset, statistics_type:str, batch_size:int, i_epoch:int, out_label_tensor, gold_label_indices, epoch_mean_loss_value):
        '''统计结果'''
        # CRF 输出结果是整数标签，而 Softmax 输出结果是浮点向量，这里统一为整数标签
        if out_label_tensor.dtype is torch.float32 or out_label_tensor.dtype is torch.float64:
            out_label_indices = out_label_tensor.argmax(dim = 1)
        elif out_label_tensor.dtype is torch.int32 or out_label_tensor.dtype is torch.int64:
            out_label_indices = out_label_tensor
        else:
            raise Exception('意外的张量数据类型')

        predict_vector = out_label_indices.cpu().numpy()
        actual_vector = gold_label_indices.cpu().numpy()

        if combine_label_head_and_tail:
            # 将 B- 和 I- 的标签结果合并
            statistics_type += ' 整标签'
            new_labels = set()
            for label in self.known_labels:
                if label.startswith('B-') or label.startswith('I-'):
                    label_stem = label[2:]
                    new_labels.add(label_stem)
                else:
                    new_labels.add(label)
            new_labels = list(new_labels)
            new_labels.sort()
            label_convert_dict = dict()
            for i, label in enumerate(self.known_labels):
                if label.startswith('B-') or label.startswith('I-'):
                    label_convert_dict[i] = new_labels.index(label[2:])
                else:
                    label_convert_dict[i] = new_labels.index(label)
            copied_predict_vector = predict_vector.copy()
            copied_actural_vector = actual_vector.copy()
            for old, new in label_convert_dict.items():
                copied_predict_vector[predict_vector == old] = new
                copied_actural_vector[actual_vector == old] = new
            predict_vector = copied_predict_vector
            actual_vector = copied_actural_vector
            statistics_label_list = new_labels
        else:
            # 按通常方式对待 B- 和 I- 标签
            statistics_type +=' 区分标签头尾'
            statistics_label_list = self.known_labels

        cm = ConfusionMatrix(
            actual_vector = actual_vector,
            predict_vector = predict_vector,           
        )
        overall_stat_str = '|项目|值|\n'
        overall_stat_str += '|----|----|\n'
        for item_key, item_value in cm.overall_stat.items():
            overall_stat_str += f'|{item_key}|{item_value}|\n'
        class_stat_str = '|项目|' + '|'.join(statistics_label_list) + '|\n'
        class_stat_str += '|----|' + '----|' * len(statistics_label_list) + '\n'
        for item_key, item_value in cm.class_stat.items():
            class_stat_str += f'|{item_key}|'
            for class_v in item_value.values():
                # if type(class_v) is float:
                #     class_v = round(class_v * 1e4) / 1e4
                class_stat_str += f'{class_v}|'
            class_stat_str += '\n'
        # 写入文件的统计数据
        if not os.path.exists(f'./statistics/{self.train_id}'):
            os.makedirs(f'./statistics/{self.train_id}')
        md_file_name = f'./statistics/{self.train_id}/{statistics_type}_统计数据_{model.get_description()}_{dataset.corpus_type}.md'
        with open(md_file_name, 'a+', encoding='utf8') as f:
            f.write(f'# {statistics_type}统计数据 {i_epoch} 轮 {datetime.datetime.now()}\n\n')
            f.write(f'平均损失 {epoch_mean_loss_value}\n\n')
            f.write(f'批次大小 {batch_size}\n\n')
            f.write(f'epoch {i_epoch}\n\n')
            f.write(f'## 预训练模型\n\n')
            f.write(f'{model.pretrained_model_name}\n\n')
            f.write(f'## 总体指标\n\n')
            f.write(overall_stat_str + '\n')
            f.write(f'## 各标签指标\n\n')
            f.write(class_stat_str + '\n')        
        # 用于 TensorBoard 的统计数据
        self.tensorboard_writer.add_scalar(f'{statistics_type} 损失', epoch_mean_loss_value, i_epoch)
        self.tensorboard_writer.add_scalar(f'{statistics_type} 总体 F1', cm.overall_stat['F1 Macro'], i_epoch)
        f1_dict = {statistics_label_list[label_index]: value for label_index, value in cm.class_stat['F1'].items()}
        self.tensorboard_writer.add_scalars(f'{statistics_type} 各标签 F1', f1_dict, i_epoch)
        #return overall_stat_str, class_stat_str, cm.overall_stat, cm.class_stat
        return cm.overall_stat['F1 Macro']

    def time_span_since_start(self, start_time:float) -> None:
        time_span = time.perf_counter() - start_time
        seconds = floor(time_span % 60)
        time_span /= 60
        minutes = floor(time_span % 60)
        time_span /= 60
        hours = floor(time_span)
        return hours, minutes, seconds

    def write_hot_config(self) -> None:
        config = {
            "learning_rate" : self.learning_rate,
            "batch_size" : self.train_batch_size,
        }
        with open('hot_config.json', mode='w') as f:
            json.dump(config, f)

    def _save_state(self, model:NerModel, train_dataset:Dataset, test_dataset:Dataset, i_epoch:int, batch_count:int) -> None:
        y = datetime.datetime.now().year
        m = datetime.datetime.now().month
        d = datetime.datetime.now().day
        h = datetime.datetime.now().hour
        mi = datetime.datetime.now().minute
        s = datetime.datetime.now().second
        ms = datetime.datetime.now().microsecond
        model_name = model.get_description()
        dataset_name = self.dataset_name
        if not os.path.exists(f'./saves/{self.train_id}'):
            os.makedirs(f'./saves/{self.train_id}')
        torch.save(
            {
                'model' : model,
                'pretrained_model_name' : model.pretrained_model_name,
                'optimizer_state' : self.optimizer.state_dict(),
                'train_corpus_path' : train_dataset.corpus_path,
                'test_corpus_path' : test_dataset.corpus_path,
                'corpus_type' : train_dataset.corpus_type,
                'known_labels' : model.known_labels,
                'sentence_max_len' : train_dataset.sentence_max_len,
                'epoch' : i_epoch,
                'batch_count' : batch_count,                
                'train_id' : self.train_id,
                'train_batch_size' : self.train_batch_size,
                'test_batch_size' : self.test_batch_size,
                'learning_rate' : self.learning_rate,
            },
            f'./saves/{self.train_id}/{model_name}_{dataset_name}_epoch{i_epoch}_{y:02d}-{m:02d}-{d:02d}_{h:02d}-{mi:02d}-{s:02d}_{ms:03d}.pt'
        )