import os.path

from . import get_data
import torch
import numpy as np
import datetime
import random
from d2l import torch as d2l
from bdtime import tt
from .time_tools import with_timer
import json


data_size = 1000
batch_size = 32


device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('--- device:', device)
output_dir = os.path.join('tempdir', 'models')
os.makedirs(output_dir, exist_ok=True)
print('--- output_dir:', output_dir)


def auto_pad_ls(input_ids, pad_token_id, num_steps=None):
    if not num_steps:
        num_steps = max([len(i) for i in input_ids])
    qa = input_ids[0]
    if isinstance(qa, torch.Tensor):
        input_ids = [torch.cat((qa, torch.Tensor([pad_token_id] * (num_steps - len(qa)))), dim=-1) for qa in input_ids]
    else:
        input_ids = [qa + [pad_token_id] * (num_steps - len(qa)) for qa in input_ids]
    return input_ids


class Tokenizer:
    def __init__(self, data_size=1000, batch_size=64):
        self.save_path='tempdir/tokenizer.json'

        self.original_dt_type = "%Y年%m月%d日"
        self.default_dt_type = "%d/%b/%Y"
        self.default_prefix = "请问"

        self.dt_type_dc = {
            'none': "",
            'abbr': "%d/%b/%Y",
            'bar': "%Y-%m-%d",
            'slash': "%Y/%m/%d",
            'dot': '%Y.%m.%d',
            # 'zh': "%Y年%m月%d日",
        }
        self.dt_type_ls = list(self.dt_type_dc.keys())
        self.label_dc = dict(zip(self.dt_type_ls, range(len(self.dt_type_ls))))
        self.reversed_label_dc = {j: i for i, j in self.label_dc.items()}

        self.total_types = len(self.dt_type_ls)

        self.batch_size = batch_size
        self.data_size = data_size
        self._texts_vocab = None
        self.get_data_iter()

        self.encoder = self.texts_vocab.token_to_idx
        self.decoder = {j: i for i, j in self.encoder.items()}

        self.unk_token_id = self.texts_vocab.unk
        self.pad_token_id = self.encoder['<pad>']
        self.bos_token_id = self.encoder['<bos>']
        self.eos_token_id = self.encoder['<eos>']

        self.spacial_tokens = [self.unk_token_id ,self.pad_token_id, self.bos_token_id, self.eos_token_id]
        self.spacial_symbols = self.decode(self.spacial_tokens)[0]
        # tokenizer.decode(tokenizer.spacial_tokens)[0]

    def decode(self, idx, concat_symbol=None, skip_spacial_symbols=False):
        """
        # examples:
            tokenizer.decode(_answer)  # tokens
            tokenizer.decode(_answer, concat_symbol='')  # setences
            tokenizer.decode(_answer, concat_symbol='', skip_spacial_symbols=True)  # sentences without spacial_symbols
        """
        if skip_spacial_symbols:
            skip_spacial_symbols = self.spacial_symbols
        res = get_data.conv_idx_to_tokens(
            idx, self.texts_vocab, concat_symbol=concat_symbol, skip_spacial_symbols=skip_spacial_symbols
        )
        return res

    def encode(self, ss):
        if ss and isinstance(ss[0], list):
            res = [[self.encoder[i] if i in self.encoder else self.texts_vocab.unk for i in ls] for ls in ss]
        else:
            res = [[self.encoder[i] if i in self.encoder else self.texts_vocab.unk for i in ss]]
        return res

    def get_date_time_data(self, n=1000, seed=None, prefix=True):
        if seed is not None:
            np.random.seed(seed)
        texts = []
        date_cn = []
        date_en = []
        labels = []
        for timestamp in np.random.randint(143835585, 2043835585, n):
            date = datetime.datetime.fromtimestamp(timestamp)
            x_i = date.strftime(self.original_dt_type)

            dt_type_key = random.choice(self.dt_type_ls[1:])
            dt_type_value = date.strftime(self.dt_type_dc[dt_type_key])
            y_i = date.strftime(dt_type_value)

            _dt_type_key = dt_type_key if prefix else self.default_prefix
            if prefix is None:
                _dt_type_key = random.choice(self.dt_type_ls + [self.default_prefix])

            text_i = f"{_dt_type_key}: {x_i}={y_i}"
            label_i = self.label_dc[dt_type_key] if prefix in [True, None] else 0

            date_cn.append(x_i)
            date_en.append(y_i)
            labels.append(label_i)
            texts.append(text_i)

        return date_cn, date_en, texts, labels

    def get_data_iter(self, batch_size=0, data_size=0, is_train=True, texts_vocab=None, prefix=None, seed=1):
        if not batch_size:
            batch_size = self.batch_size
        if not data_size:
            data_size = self.data_size
        date_cn, date_en, texts, labels = self.get_date_time_data(data_size, seed=seed, prefix=prefix)

        max_seq_length = max([len(ss) for ss in texts])
        num_steps = max_seq_length + 2

        texts_tokens = get_data.tokenize(texts)
        if texts_vocab is None:
            if os.path.exists(self.save_path):
                if self._texts_vocab is None:
                    print(f'~~~~~~~~~~~~~ load_vocab_from_disk from [{self.save_path}]')
                    self._texts_vocab = self.load_vocab_from_disk()
                texts_vocab = self._texts_vocab
            else:
                print(f'~~~~~~~~~~~~~ save_vocab to [{self.save_path}]')
                texts_vocab = d2l.Vocab(texts_tokens, min_freq=2, reserved_tokens=get_data.SpecialSymbol.all())

        texts_array, texts_valid_len = get_data.build_array_nmt(texts_tokens, texts_vocab, num_steps)
        labels = torch.LongTensor(labels).to(device)
        texts_array = texts_array.to(device)
        texts_valid_len = texts_valid_len.to(device)

        attention_mask = [[1] * _valid_len + [0] * (num_steps - _valid_len) for _valid_len in texts_valid_len]
        attention_mask = torch.LongTensor(attention_mask)

        bos_token_id = texts_vocab['<bos>']
        bos = torch.tensor([bos_token_id] * texts_array.shape[0], device=device).reshape(-1, 1)
        texts_array = torch.cat([bos, texts_array[:, :-1]], 1)  # 强制教学

        data_arrays = (texts_array, attention_mask, labels)
        data_iter = d2l.load_array(data_arrays, batch_size)

        if is_train:
            self.texts, self.labels = texts, labels
            self.max_seq_length, self.num_steps = max_seq_length, num_steps
            self.data_iter, self._texts_vocab = data_iter, texts_vocab
            self.per_iter_times = len(data_iter)
            self.save_vocab()

        return data_iter, texts_vocab

    def add_bos(self, input_ids):
        input_ids = conv_to_tensor(input_ids, torch.LongTensor)
        bos = torch.tensor([self.bos_token_id] * input_ids.shape[0], device=device).reshape(-1, 1)
        res = torch.cat([bos, input_ids[:, :-1]], 1)  # 强制教学
        return res

    @property
    def texts_vocab(self):
        if self._texts_vocab is None:
            assert os.path.exists(self.save_path), f'旧的`texts_vocab`路径[{self.save_path}]不存在?'
            print(f'~~~~~~~~~~~~~ load_vocab_from_disk from [{self.save_path}]')
            self._texts_vocab = self.load_vocab_from_disk()
        return self._texts_vocab

    def get_batch_data(self, prefix=False, batch_size=None):
        if batch_size is None:
            batch_size = self.batch_size
        data_iter, _ = tokenizer.get_data_iter(
            data_size=batch_size,
            batch_size=batch_size,
            is_train=False,
            texts_vocab=self.texts_vocab,
            prefix=prefix
        )
        for batch in data_iter:
            input_ids, attention_mask, label = [i.to(device) for i in batch]
            return label, input_ids, attention_mask

    def auto_pad_ls(self, input_ids, pad_token_id=None, num_steps=None):
        if pad_token_id is None:
            pad_token_id = self.pad_token_id
        input_ids = auto_pad_ls(input_ids, pad_token_id=pad_token_id, num_steps=num_steps)
        return input_ids

    def batch_pad(self, text=None, token=None, add_bos=True, auto_pad=True):
        if text:
            # 编码
            # token = [[self.encoder[j] for j in i] for i in text]
            token = get_data.tokenize(text)
            num_steps = max([len(i) for i in token]) + 1
            input_ids, valid_len = get_data.build_array_nmt(token, vocab=self.texts_vocab, num_steps=num_steps)
        else:
            valid_len = [len(i) for i in token]
            lens = max(valid_len)
            num_steps = lens
            input_ids = []
            for i in token:
                if isinstance(i, torch.Tensor):
                    i = i.cpu().numpy().tolist()
                input_ids.append(i + [self.pad_token_id] * (lens - len(i)))

        if 0:
            lens = max([len(i) for i in token]) + 1

            input_ids = []
            attention_mask = []
            for i in token:
                if add_bos:
                    attention_mask.append([1] * (len(i) + 1) + [0] * (lens - len(i)))
                else:
                    attention_mask.append([1] * len(i) + [0] * (lens - len(i)))
                if isinstance(i, torch.Tensor):
                    i = i.cpu().numpy().tolist()
                input_ids.append(i + [self.pad_token_id] * (lens - len(i)))

        if auto_pad:
            input_ids = auto_pad_ls(input_ids, pad_token_id=self.pad_token_id, num_steps=num_steps)

        if add_bos:
            if input_ids[0][0] != tokenizer.bos_token_id:
                input_ids = conv_to_tensor(input_ids, torch.LongTensor)
                input_ids = self.add_bos(input_ids).cpu().numpy().tolist()
                num_steps += 1
        attention_mask = get_data.get_attention_mask_by_valid_len(valid_len, num_steps)
        return input_ids, attention_mask

    def save_vocab(self):
        with open(self.save_path, 'w') as f:
            # 将self中类属性写入json
            json.dump(self.texts_vocab.__dict__, f, indent=4)

    def load_vocab_from_disk(self):
        texts_vocab = d2l.Vocab([], min_freq=2, reserved_tokens=get_data.SpecialSymbol.all())
        with open(self.save_path, 'r') as f:
            params = json.load(f)
            texts_vocab.__dict__.update(params)
        self._texts_vocab = texts_vocab
        return texts_vocab
    pass


with with_timer(f"生成tokenizer和data_iter, data_size: {data_size}", tt) as wt:
    tokenizer = Tokenizer(data_size=data_size, batch_size=batch_size)


class ModelCLS(torch.nn.Module):

    def __init__(self, labels, vocab_size):
        super().__init__()
        from transformers import BertConfig, BertModel

        self.config = BertConfig(hidden_size=64,
                                 intermediate_size=64,
                                 max_position_embeddings=128,
                                 num_attention_heads=4,
                                 num_hidden_layers=4,
                                 vocab_size=vocab_size)
        self.feature = BertModel(self.config)

        self.fc_out = torch.nn.Sequential(
            torch.nn.Dropout(p=0.1),
            torch.nn.Linear(64, labels)
        )

        # from tools import get_tokenizer, get_pretrained_model__bert

        self.to(device)
        self.train()

    def forward(self, input_ids, attention_mask):
        out = self.feature(input_ids=input_ids, attention_mask=attention_mask).pooler_output

        return self.fc_out(out)


def test_predict_cls(model_cls, input_ids, attention_mask, label, show_times=5):
    with torch.no_grad():
        accuracies = []
        # input_ids, attention_mask, label = [i.to(device) for i in batch]
        logits = model_cls(input_ids=input_ids, attention_mask=attention_mask)
        logits = logits.argmax(1)
        acc = (logits == label).sum().item() / len(label)
        accuracies.append(acc)

        for i in range(min(show_times, len(input_ids))):
            pred_type = tokenizer.reversed_label_dc[logits[i].item()]
            label_type = tokenizer.reversed_label_dc[label[i].item()]
            test_i = [''.join(ls) for ls in tokenizer.decode(input_ids[i].tolist())][0]

            info = f'{"True ---" if pred_type == label_type else "False ***"} label: pred_type -> [{label_type}]: [{pred_type}], test_i: [{test_i}]'
            print(info)
        assert len(accuracies), 'accuracies不能为空!'
        accuracy = round(sum(accuracies) / len(accuracies), 3)
        print(f"------ model_cls accuracy: {accuracy}")
    return accuracy


def test_model_cls(model_cls, test_data_iter, show_times=5):
    with torch.no_grad():
        accuracies = []
        for batch, _epoch in zip(test_data_iter, range(len(test_data_iter))):
            input_ids, attention_mask, label = [i.to(device) for i in batch]
            logits = model_cls(input_ids=input_ids, attention_mask=attention_mask)
            logits = logits.argmax(1)
            acc = (logits == label).sum().item() / len(label)
            accuracies.append(acc)

            if _epoch == 0:
                for i in range(show_times):
                    pred_type = tokenizer.reversed_label_dc[logits[i].item()]
                    label_type = tokenizer.reversed_label_dc[label[i].item()]
                    test_i = [''.join(ls) for ls in tokenizer.decode(input_ids[i].tolist())][0]
                    info = f'--- pred_type: label -> [{pred_type}]: [{label_type}], test_i: [{test_i}]'
                    print(info)
        assert len(accuracies), 'accuracies不能为空!'
        res = round(sum(accuracies) / len(accuracies), 3)
    return res


class ModelGEN(torch.nn.Module):

    def __init__(self):
        super().__init__()
        from transformers import GPT2Config, GPT2Model
        self.config = GPT2Config(bos_token_id=tokenizer.bos_token_id,
                                 eos_token_id=tokenizer.eos_token_id,
                                 pad_token_id=tokenizer.pad_token_id,
                                 n_embd=64,
                                 n_head=4,
                                 n_layer=4,
                                 n_positions=128,
                                 vocab_size=len(tokenizer.decoder))

        self.feature = GPT2Model(self.config)

        self.fc_out = torch.nn.Linear(64, self.config.vocab_size, bias=False)

        self.to(device)
        self.train()

    def forward(self, input_ids, attention_mask):
        out = self.feature(input_ids=input_ids,
                           attention_mask=attention_mask).last_hidden_state

        return self.fc_out(out)


def test_model_gen(model_gen, test_data_iter, show_times=5):
    from d2l import torch as d2l

    # d2l.bleu()
    with torch.no_grad():
        accuracies = []
        for batch, _epoch in zip(test_data_iter, range(len(test_data_iter))):
            input_ids, attention_mask, label = [i.to(device) for i in batch]
            _logits = model_gen(input_ids=input_ids, attention_mask=attention_mask)
            logits = _logits.argmax(2)
            acc = (logits == label).sum().item() / len(label)
            accuracies.append(acc)

            if _epoch == 0:
                for i in range(show_times):
                    pred_type = tokenizer.reversed_label_dc[logits[i].item()]
                    label_type = tokenizer.reversed_label_dc[label[i].item()]
                    test_i = [''.join(ls) for ls in tokenizer.decode(input_ids[i].tolist())][0]
                    info = f'--- pred_type: label -> [{pred_type}]: [{label_type}], test_i: [{test_i}]'
                    print(info)
        assert len(accuracies), 'accuracies不能为空!'
        res = round(sum(accuracies) / len(accuracies), 3)
    return res


generater = None


def generate(model_gen, input_ids):
    global generater
    if not generater:
        #包装类,用于生成
        from transformers import GPT2LMHeadModel
        generater = GPT2LMHeadModel(model_gen.config)
        generater.transformer = model_gen.feature
        generater.lm_head = model_gen.fc_out
        generater.to(device)

    return generater.generate(input_ids=input_ids,
                              min_length=-1,
                              top_k=0.0,
                              top_p=1.0,
                              do_sample=True,
                              pad_token_id=tokenizer.pad_token_id,
                              max_new_tokens=25,
                              eos_token_id=tokenizer.eos_token_id)


output_path__cls_model = os.path.join(output_dir, 'cls.model')
output_path__gen_model = os.path.join(output_dir, 'gen.model')
output_path__ppo_model = os.path.join(output_dir, 'ppo.model')


def conv_to_tensor(input_ids, tensor_type=torch.LongTensor, to_device=True):
    if isinstance(input_ids, torch.Tensor):
        input_ids = tensor_type(input_ids.cpu().numpy())
    elif isinstance(input_ids, list):
        if isinstance(input_ids[0], list):
            input_ids = tensor_type(input_ids)
        elif isinstance(input_ids[0], torch.Tensor):
            input_ids = torch.stack(input_ids)
            input_ids = tensor_type(input_ids.cpu().numpy())
        else:
            raise TypeError(f"input_ids[0]类型错误? type: [{type(input_ids[0])}]")
    if to_device:
        input_ids = input_ids.to(device)
    return input_ids


from bdtime import show_ls


class TestModel:
    @staticmethod
    def show_decode_input_ids(input_ids, n=5):
        show_ls([''.join(ls) for ls in tokenizer.decode(input_ids)[:n]])

    @staticmethod
    def get_question(prefix=True, batch_size=32, ret_real_answer=False):
        label, input_ids, attention_mask = tokenizer.get_batch_data(prefix=prefix, batch_size=batch_size)
        # label = torch.LongTensor(label).to(device)
        label = conv_to_tensor(label, torch.LongTensor)

        # 只要问题部分,等号后面的内容切除
        if isinstance(input_ids[0], list):
            question = [i[:i.index(tokenizer.encoder['=']) + 1] for i in input_ids]
        else:
            question = [q[:torch.where(q == tokenizer.encoder['='])[0].item() + 1] for q in input_ids]

        question = [torch.LongTensor(q.cpu().numpy()).to(device) for q in question]

        if ret_real_answer:
            if isinstance(input_ids[0], list):
                real_answer = [i[i.index(tokenizer.encoder['=']) + 1:] for i in input_ids]
            else:
                # question = [q[:torch.where(q == tokenizer.encoder['='])[0].item() + 1] for q in input_ids]
                # real_answer = [i[torch.where(i == tokenizer.encoder['='])[0].item() + 1 + 1:] for i in input_ids]
                real_answer = [i[torch.where(i == tokenizer.encoder['='])[0].item() + 1:] for i in input_ids]

            real_answer = [torch.LongTensor(o.cpu().numpy()).to(device) for o in real_answer]
            # tokenizer.decode(real_answer, '')
            return label, question, attention_mask, real_answer

        return label, question, attention_mask

    @staticmethod
    def test_gen__get_answer(question, model_gen):
        """
        如果question的长度确定,这里可以转换成批运算
        """
        _answer = [generate(model_gen, i.unsqueeze(0)) for i in question]
        # 裁剪,只要生成的部分
        answer = [a[0, len(q):] for q, a in zip(question, _answer)]

        return answer


def test_gen(model_gen=None, test_times=3, my_questions=None, prefix=False, batch_size=1, skip_spacial_symbols=False):
    from utils.my_tools import TestModel

    if model_gen is None:
        assert os.path.exists(output_path__gen_model), f"model_gen不存在? output_path__gen_model: [{output_path__gen_model}]"
        model_gen: ModelGEN = torch.load(output_path__gen_model)

    # _, input_ids = TestModel.get_question(prefix=True)
    if my_questions:
        # tokenizer.default_prefix
        # input_ids, attention_mask = tokenizer.batch_pad(text=my_questions)
        # from utils.my_tools import conv_to_tensor
        # input_ids = conv_to_tensor(input_ids, torch.LongTensor)
        # attention_mask = conv_to_tensor(attention_mask, torch.LongTensor)

        input_ids_ls = []
        attention_mask_ls = []
        for my_q in my_questions:
            _input_ids, _attention_mask = tokenizer.batch_pad(text=[my_q])
            input_ids_ls.append(_input_ids[0])
            attention_mask_ls.append(_attention_mask[0])
    else:
        _, input_ids_ls, attention_mask, targets = TestModel.get_question(prefix=prefix, batch_size=batch_size, ret_real_answer=True)
        # input_ids
        input_ids_ls, attention_mask_ls = tokenizer.batch_pad(token=input_ids_ls)
        # [len(i) for i in input_ids]
        assert len(input_ids_ls[0]) == len(attention_mask_ls[0]), f'长度不等? {len(input_ids_ls[0])} != {len(attention_mask_ls[0])}'
        # [len(i) for i in targets]

    from utils.my_tools import conv_to_tensor

    for input_ids in input_ids_ls:
        input_ids = conv_to_tensor([input_ids], torch.LongTensor)
        question_str_ls = tokenizer.decode(input_ids, '', skip_spacial_symbols=skip_spacial_symbols)

        for i in range(test_times):
            # answer_ls = generate(model_gen, input_ids)
            answer_ls = TestModel.test_gen__get_answer(input_ids, model_gen)
            answer_str_ls = tokenizer.decode(answer_ls, concat_symbol="", skip_spacial_symbols=skip_spacial_symbols)

            show_ls([f'--- test_gen [{i + 1}] / {test_times} --- [{q}{a}]' for q, a in list(zip(question_str_ls, answer_str_ls))[:5]])


def show_qap(question, answer, predict, end=5, skip_spacial_symbols=True):
    show_ls([tokenizer.decode(q, '', skip_spacial_symbols) +
             tokenizer.decode(a, '', skip_spacial_symbols) +
             tokenizer.decode(p, '', skip_spacial_symbols)
             for q, a, p in list(zip(question, answer, predict))[:end]])


class ModelPPO(torch.nn.Module):

    def __init__(self, model_gen):
        super().__init__()
        self.model_gen = model_gen
        self.v_head = torch.nn.Sequential(torch.nn.Dropout(0.1),
                                          torch.nn.Linear(64, 1))

        self.to(device)
        self.train()

    def forward(self, input_ids, attention_mask):
        last_hidden_state = self.model_gen.feature(
            input_ids=input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True).last_hidden_state

        logits = self.model_gen.fc_out(last_hidden_state)
        value = self.v_head(last_hidden_state).squeeze(-1)

        return logits, value


# remake = False  # 重新制作
# if remake:
#     for f_path in [tokenizer.save_path, output_path__gen_model, output_path__ppo_model, output_path__cls_model]
#         if os.path.exists(f_path):
#             os.remove(f_path)






