import os
import sentencepiece
import json
import torch.nn
import logging
from torch.nn import functional as F
from transformers import XLNetModel, PretrainedConfig
from tool import torch_tool, attentions
from loaders.data_iflytek import get_data, read_json_list

logging.basicConfig(level=logging.INFO, format='[%(levelname)s %(filename)s %(funcName)s:%(lineno)d] %(message)s')
log = logging.getLogger(__file__)


def xlnet_forward(self, input_ids=None):  # ids~(bsz, seq)
    input_ids = input_ids.transpose(0, 1).contiguous()  # ids~(seq, bsz)
    qlen, bsz = input_ids.shape[0], input_ids.shape[1]

    word_emb_k = self.word_embedding(input_ids)  # (seq, bsz, dim)
    output_h = self.dropout(word_emb_k)  # (seq, bsz, dim)
    output_g = None

    klen = qlen
    pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
    pos_emb = self.dropout(pos_emb)

    out = []
    for i in range(self.out_layer + 1):
        layer_module = self.layer[i]
        outputs = layer_module(output_h, output_g, attn_mask_h=None, attn_mask_g=None,
                               r=pos_emb, seg_mat=None, mems=None, target_mapping=None, head_mask=None)
        output_h, output_g = outputs[:2]
        if i in self.sweet_add_layers:
            out.append(output_h)

    out = [t.permute(1, 0, 2).contiguous() for t in out]
    return out


class XlnetEncoder(torch.nn.Module):
    def __init__(self, pretrain_model_dir, sweet_add_layers, dropout=0.1):
        super().__init__()
        XLNetModel.forward = xlnet_forward
        xlnet_model_path = os.path.join(pretrain_model_dir, 'pytorch_model.bin')
        xlnet_config_path = os.path.join(pretrain_model_dir, 'config.json')
        xlnet_sp_path = os.path.join(pretrain_model_dir, 'spiece.model')
        if not os.path.isfile(xlnet_sp_path):
            log.error(f'缺少文件 {xlnet_sp_path}')
            raise ValueError('缺少文件')
        if not os.path.isfile(xlnet_config_path):
            log.error(f'缺少文件 {xlnet_config_path}')
            raise ValueError('缺少文件')

        if os.path.isfile(xlnet_model_path):
            self.pretrain_model = XLNetModel.from_pretrained(pretrain_model_dir)
        else:
            log.warning(f'{xlnet_model_path} 文件不存在 以配置文件初始化xlnet模型')
            sp = sentencepiece.SentencePieceProcessor()
            sp.load(xlnet_sp_path)
            vocab_size = sp.get_piece_size()

            config_json = json.load(open(xlnet_config_path))
            config_obj = PretrainedConfig(**config_json)
            config_obj.vocab_size = vocab_size
            self.pretrain_model = XLNetModel(config_obj)

        self.pretrain_model.sweet_add_layers = sweet_add_layers
        self.dropout = torch.nn.Dropout(dropout)

    def set_layer(self, out_layer, frozen_layer):
        self.pretrain_model.out_layer = out_layer
        if frozen_layer > 0:
            torch_tool.frozen(self.pretrain_model.word_embedding)
            for i in range(frozen_layer + 1):
                torch_tool.frozen(self.pretrain_model.layer[i])

    def forward(self, input_ids):
        out = self.pretrain_model(input_ids)
        return out


class SequenceClassifierModule(torch.nn.Module):
    # 输入序列数据  输出分类
    def __init__(self, type_num, dim=100, head_num=2):
        super().__init__()
        self.dim = dim
        rtype_num = 1  # 分类的维度（规则rule）
        self.query = torch.nn.Parameter(torch.rand(1, rtype_num, dim, requires_grad=True), requires_grad=True)
        self.attn = attentions.MultiHeadedAttention(h=head_num, d_model=dim)
        self.line = torch.nn.Linear(dim, type_num)
        torch_tool.init_linear(self.line)

    def forward(self, x):
        # x~(bsz, seq_len, dim)   class_gold~(bsz)
        bsz, seq_len, dim = x.size()
        query = self.query.expand(bsz, -1, -1)  # (bsz, rtype_num=1, dim)
        pool_x = self.attn(query, x, x)  # pool_x~(bsz, rtype_num=1, dim)
        pool_x = pool_x.squeeze(1)  # pool_x~(bsz, dim)
        logit = self.line(pool_x)  # pool_x~(bsz, type_num)
        return logit


class Classifier(torch.nn.Module):
    # 分类器
    def __init__(self, type_list, pretrain_model_dir, sweet_add_layers, d_model=768):
        super().__init__()
        self.sweet_i = 0
        self.sweet_add_layers = sweet_add_layers
        self.types = type_list
        self.input_encoder = XlnetEncoder(pretrain_model_dir, sweet_add_layers=sweet_add_layers, dropout=0.1)
        models = [SequenceClassifierModule(len(self.types), d_model, head_num=12) for _ in range(len(sweet_add_layers))]
        self.classifier_list = torch.nn.ModuleList(models)
        torch_tool.init_model_weights(self)

    def sweet_add(self, i):
        self.sweet_i = i
        if i == 0:
            frozen = 0
        else:
            frozen = self.sweet_add_layers[i - 1]
        self.input_encoder.set_layer(self.sweet_add_layers[i], frozen_layer=frozen)
        for now_i in range(i):
            torch_tool.frozen(self.classifier_list[now_i])

    def forward(self, input_ids, ):
        x_list = self.input_encoder(input_ids)
        if self.training:
            pred_logit = self.classifier_list[self.sweet_i](x_list[self.sweet_i])  # ~(bsz, type_num)  概率
            pred_idx = pred_logit.argmax(dim=1).tolist()
            pred_types = [self.types[i] for i in pred_idx]
        else:
            pred_prob = 0
            for now_i in range(self.sweet_i + 1):
                pred_logit = self.classifier_list[now_i](x_list[now_i])
                pred_prob = pred_prob + pred_logit.softmax(dim=1)
            pred_idx = pred_prob.argmax(dim=1).tolist()
            pred_types = [self.types[i] for i in pred_idx]
        out = {'pred_logit': pred_logit, 'pred_types': pred_types, 'pred_idx': pred_idx}
        return out


def create_model(config, model_path=None):
    # 创建模型, 并载入模型权重文件, 并转移到对应计算设备(GPU/CPU)
    info_path = os.path.join(config.data_dir, 'labels.json')
    data_info = read_json_list(info_path)
    type_list = [t['label_des'] for t in data_info]
    model = Classifier(type_list, config.pretrain_model_dir, config.sweet_add_layers)
    if model_path:
        if os.path.isfile(model_path):
            torch_tool.log.info(f'载入模型文件 {model_path}')
            model = torch_tool.load_cpu_model(model, model_path)
        else:
            torch_tool.log.warning(f'模型文件不存在 {model_path}')
    return torch_tool.cuda(model)


if __name__ == '__main__':
    model = Classifier(['1', '2'], 'E:/code/data/pretrain_model_file/xlnet/chinese_small_xlnet', [5, 7, 9, 11])
    ids = torch.arange(10)
    ids = ids.unsqueeze(0)
    model.sweet_add(0)
    y = model(ids)
    print(y)
    model.sweet_add(1)
    model.eval()
    y = model(ids)
