import pickle
import random
import math
import json
import os
import time
import torch.optim
import torch.nn.functional
from tqdm import tqdm
from tool import torch_tool, scorers, log_tool
import factory


class Learner:
    def __init__(self, config):
        create_model = factory.get(config.model_name)
        self.get_data = factory.get(config.data_name)

        self.c = config
        self.folder = config.folder
        os.makedirs(self.folder, exist_ok=True)
        self.logs = log_tool.Logs(self.folder)
        self.sweet_add_layers = config.sweet_add_layers
        self.right_sampling_rate = config.right_sampling_rate
        self.sweet_threshold = config.sweet_threshold
        self.revise_threshold = config.revise_threshold
        self.sweet_add_init_layer = config.sweet_add_init_layer
        self.sweet_add_max_layer = config.sweet_add_max_layer
        self.vocab = torch_tool.SentencePieceVocab(config.sp_path)
        self.label_tuples = config.label_tuples
        self.batch_size = config.batch_size  # fixme
        self.logs.brief.info(f'self.batch_size={self.batch_size}')
        self.group_task = config.group_task
        self.etypes = config.etypes
        self.loss_mean = torch_tool.MovingAverage(beta=0.99)
        self.model_path = os.path.join(self.folder, 'model.th')
        self.visual_score_path = os.path.join(self.folder, 'visual_score.html')
        self.score_path = os.path.join(self.folder, 'score.json')
        self.model = create_model(config, self.model_path)
        self.optim = torch_tool.WarmupOptim(self.model.parameters(), lr=self.c.lr, optim=torch.optim.Adam)

        sp_path = os.path.join(config.pretrain_model_dir, 'spiece.model')
        if not os.path.isfile(sp_path):
            sp_path = os.path.join(self.folder, 'spiece.model')
            if not os.path.isfile(sp_path):
                self.logs.brief.error(f'{sp_path} 文件不存在')
                raise ValueError('文件不存在')
        self.vocab = torch_tool.SentencePieceVocab(sp_path)
        config.save(os.path.join(self.folder, 'config.yaml'))

    def show_detail(self, dtype, batch, y):
        # 打印输入和输出（便于debug）
        # 暂时只支持batch-size=1
        # 打印 input-id对应字符串
        bindex = 0  # batch_index
        input_ids = batch['input_ids'].tolist()[bindex]
        input_txt = ' '.join([self.vocab.char(t) for t in input_ids])

        pred_type = y['pred_types'][bindex]
        gold_type = batch['gold_types'][bindex]
        if pred_type == gold_type:
            result = f'gold={gold_type} 推测正确'
        else:
            result = f'gold={gold_type} pred={pred_type}'
        txt = f'-------------------------- {dtype} -------------------------------'
        txt += f'\n{input_txt}\n{result}'
        self.logs.all.info(txt)

    def train_epoch(self, data, epoch, loss_fn):
        self.model.train()
        start = 0
        tqdm_data = tqdm(data)
        total = len(data)
        right_num = 0
        for i, batch in enumerate(tqdm_data):
            self.optim.zero_grad()
            y = self.model(batch['input_ids'])
            if y['pred_idx'] == batch['gold_idx'].tolist():
                right_num += 1
            loss = loss_fn(y['pred_logit'], batch['gold_idx'])
            loss_float = loss.item()
            if math.isnan(loss_float):  # debug 定位到出现nan的前一刻
                self.save_model(debug=True)
                batch_path = os.path.join(self.folder, 'debug_batch.pkl')
                with open(batch_path, 'wb') as f:
                    cpu_batch = {}
                    for k, v in batch.items():
                        if isinstance(v, torch.Tensor):
                            v = v.cpu()
                        cpu_batch[k] = v
                    pickle.dump(cpu_batch, f)
                self.logs.all.error(f'loss=nan !!')
                continue
                # exit()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.01, norm_type=2)
                self.optim.step()
                self.loss_mean.update(loss_float)

            now = time.time()
            if now - start > 2:
                start = now
                tqdm_data.set_description(f'training: epoch={epoch}  loss={self.loss_mean.value:0.6f}')
            if i == total - 1:
                accuracy = right_num / total if total > 0 else 0  # fixme: 仅仅支持 batch-size=1的情形
                tqdm_data.set_description(f'training: epoch={epoch}  acc={accuracy:0.5f}  loss={self.loss_mean.value:0.6f}')

    def save_model(self, debug=False):
        os.makedirs(self.folder, exist_ok=True)
        if debug:
            path = os.path.join(self.folder, 'debug_model.th')
        else:
            path = self.model_path
        torch_tool.save_model(self.model, path)

    def predict(self, txt):
        txt = txt[:self.c.max_len]
        input_ids = [self.vocab.char_id(t) for t in txt]
        input_ids = [input_ids]  # 因为batch-size=1
        input_ids = torch_tool.cuda(torch.tensor(input_ids, dtype=torch.long))

        y = self.model(input_ids)
        pred_type = y['pred_types'][0]
        return pred_type

    def save_score(self, score):
        os.makedirs(self.folder, exist_ok=True)
        with open(self.score_path, 'w') as f:
            json.dump(score, f, ensure_ascii=False, indent=2)
        # save_score_html(self.visual_score_path, score)

    def get_score(self, data, dtype=''):
        self.model.eval()
        scorer = scorers.ClassifyF1Scorer()
        # for i, batch in enumerate(tqdm(data, desc='get_score', leave=False)):
        with torch.no_grad():
            for i, batch in enumerate(tqdm(data, desc='get_score')):
                y = self.model(batch['input_ids'])
                if i < 3:
                    dtype = dtype or data.dtype
                    self.show_detail(dtype, batch, y)
                scorer.update(y['pred_idx'], batch['gold_idx'])
        f1 = scorer.f1()
        return f1

    def is_best(self, now_score, all_scores):
        if all_scores['best'] is None or all_scores['best']['valid'] < now_score['valid']:
            return True
        return False

    def get_right_data(self, data):
        # fixme 当前仅仅支持batch-size=1的情形
        self.model.eval()
        right_data, error_data = [], []
        with torch.no_grad():
            for i, batch in enumerate(tqdm(data, desc='get_right_data', leave=False)):
                y = self.model(batch['input_ids'])
                if y['pred_idx'] == batch['gold_idx'].tolist():
                    right_data.append(batch)
                else:
                    error_data.append(batch)
        return right_data, error_data

    def sweet_add_train(self, data, threshold, loss_fn):
        # 甜蜜点增量学习 调用的训练过程
        for epoch in range(1, 101):
            t = f'sweet-epoch~{epoch}'
            score = self.get_score(data, dtype=t)
            self.logs.brief.info(f'{t}  开始前 f1={score}')  # 先计算分数再训练是因为可能载入以前的模型
            if score > threshold:
                return
            self.train_epoch(data, epoch=t, loss_fn=loss_fn)
            self.save_model()

    def get_sweet_learn_data(self, right_data, error_data):
        # 学习集 = 正确集以概率采样 + 错误集
        out = random.sample(right_data, math.ceil(self.right_sampling_rate * len(right_data)))
        out += error_data
        random.shuffle(out)
        return out

    def train(self, start=None, end=None, repeat=None):
        # 甜蜜点增量学习流程（每次学会大部分数据， 然后固定老参数，增加新参数去学习错误数据）
        train_data = self.get_data(self.c, 'train', start=start, end=end, repeat=repeat, batch_size=self.batch_size)
        loss_fn = torch.nn.CrossEntropyLoss()

        # 第一次 增量训练
        self.logs.brief.info(f'增量训练...')
        self.logs.brief.info(f'######## 增量训练 sweet_i=0  ########')
        self.model.sweet_add(0)
        self.sweet_add_train(train_data, threshold=self.sweet_threshold, loss_fn=loss_fn)

        # 循环N次 增量训练
        for sweet_i in range(1, len(self.sweet_add_layers)):
            right_data, error_data = self.get_right_data(train_data)
            self.logs.brief.info(f'######## 增量训练 sweet_i={sweet_i}  当前的训练集 right~{len(right_data)}  error~{len(error_data)} ########')

            # 对模型增量扩容（冻结之间的参数）
            self.model.sweet_add(sweet_i)
            if right_data:
                # 复习：正确集（阈值很高）
                self.logs.brief.info(f'复习 正确集数据')
                self.sweet_add_train(right_data, threshold=self.revise_threshold, loss_fn=loss_fn)  # 模型增加了层数 复习数据
            # 用增量模型， 训练 学习集（阈值较低）
            learn_data = self.get_sweet_learn_data(right_data, error_data)
            self.logs.brief.info(f'增量学习 sweet_i={sweet_i}  learn_data~{len(learn_data)}')
            self.sweet_add_train(learn_data, threshold=self.sweet_threshold, loss_fn=loss_fn)

        # 标准训练流程()      用验证集的分数来判断停止条件
        self.logs.brief.info(f'标准训练流程...')
        self.raw_train(start=start, end=end, repeat=repeat)

    def raw_train(self, start=None, end=None, repeat=None):
        #  标准的训练流程
        train_data = self.get_data(self.c, 'train', start=start, end=end, repeat=repeat, batch_size=self.batch_size)
        loss_fn = torch.nn.CrossEntropyLoss()

        small_num = min(500, end) if end else 500
        small_train = self.get_data(self.c, 'train', start=start, end=small_num, repeat=None, batch_size=self.batch_size)
        valid_data = self.get_data(self.c, 'valid', start=start, end=end, repeat=None, batch_size=self.batch_size)

        all_scores = {'epoch': 0, 'best': None, 'history': []}
        for now_epoch in range(self.c.max_epoch):
            self.train_epoch(train_data, epoch=now_epoch, loss_fn=loss_fn)
            train_score = self.get_score(small_train)
            valid_score = self.get_score(valid_data)
            all_scores['epoch'] = now_epoch
            now_score = {'epoch': now_epoch, 'train': train_score, 'valid': valid_score, 'loss': self.loss_mean.value}
            self.logs.brief.info(f'score={now_score}')
            all_scores['history'].append(now_score)
            if self.is_best(now_score, all_scores):
                all_scores['best'] = now_score
                self.save_model()
            self.save_score(all_scores)
        # 载入最佳模型后， 预测测试集
        self.predict_testset()

    def predict_testset(self):
        self.logs.brief.info(f'载入最佳模型参数')
        torch_tool.load_cpu_model(self.model, self.model_path)
        self.logs.brief.info(f'预测测试集 存储文件')
        test_data = self.get_data(self.c, 'test', batch_size=1)
        resp_path = os.path.join(self.folder, 'iflytek_predict.json')
        resp_f = open(resp_path, 'w', encoding='utf8')
        with torch.no_grad():
            for batch in tqdm(test_data, desc='predict_testset'):
                bindex = 0
                y = self.model(batch['input_ids'])
                pred_idx = y['pred_idx'][bindex]
                one = {"id": batch['id'][bindex], "label": str(pred_idx)}
                line = json.dumps(one) + '\n'
                resp_f.write(line)
        resp_f.close()

    def overfit(self, start=None, end=None, repeat=None):
        # 测试能否过拟合训练数据
        train_data = self.get_data(self.c, 'train', start=start, end=end, repeat=repeat, batch_size=self.batch_size)
        loss_fn = torch.nn.CrossEntropyLoss()
        small_num = min(500, end) if end else 500
        small_train = self.get_data(self.c, 'train', start=start, end=small_num, batch_size=self.batch_size)
        for now_epoch in range(self.c.max_epoch):
            self.train_epoch(train_data, epoch=now_epoch, loss_fn=loss_fn)
            train_score = self.get_score(small_train)
            self.logs.brief.info(f'epoch={now_epoch}  train_score={train_score}')

    def debug_forward(self):
        batch_path = os.path.join(self.folder, 'batch.pkl')
        batch = pickle.load(open(batch_path, 'rb'))
        loss = self.model(**batch)
        self.logs.brief.info(f'loss={loss}')

    def error_case(self):
        # 查看各个数据集的error-case
        self.model.eval()
        batch_idx = 0  # 只支持bsz==1的数据
        for dtype in ['train', 'valid', 'test']:
            label_scorer = scorers.SetF1Scorer()
            print(f'============================= {dtype} =============================')
            data = self.get_data(self.c, dtype, start=0, end=2, )

            for batch in data:
                with torch.no_grad():
                    y = self.model(**batch)
                print(f'---------------------------------------------------------------')
                print(batch['batch_txt'][0])
                print(batch['rtype__labels'])
                print(y['batch_label_infos'][0])

                for label_infos_pred, label_tuples_gold in zip(y['batch_label_infos'], batch['batch_label_tuples']):
                    label_tuples_pred = set()
                    for info in label_infos_pred:
                        if info['rtype'] in batch['rtypes']:
                            label_tuples_pred.add((info['rtype'], info['label']))
                    label_scorer.update(label_tuples_pred, label_tuples_gold)
            f1 = label_scorer.f1()
            print(f'f1={f1}')


if __name__ == '__main__':
    log_tool.Logs.tmp.info(f'GPU = {torch.cuda.is_available()}')
    config = factory.get('c5A')()
    learner = Learner(config)
    # learner.model.sweet_add(3)
    # learner.predict_testset()
    # learner.train(start=0, end=5, repeat=1)  # 训练测试
    learner.train()  # 训练全部数据
