import numpy as np
from torch.nn import Embedding, Module
from transformers.tokenization_bert import BertTokenizer
from cnn_model.resnet import ResNet
import torch
from torch import nn
import os,sys
os.chdir(os.path.dirname(__file__))

class FormulaClassification(Module):
    def __init__(self, res_model, embedding):
        super(FormulaClassification, self).__init__()
        self.res_model = res_model
        self.embedding = embedding
        self.loss_fct = nn.CrossEntropyLoss(weight=None, reduction='mean')

    def get_embs(self,input_ids_):
        input_ids = input_ids_.transpose(1,2)
        embs = self.embedding(input_ids)
        # 通道要提到前面来
        input_embs = embs.transpose(1, -1)
        return input_embs

    def forward(self, input_ids, labels=None):
        input_embs = self.get_embs(input_ids)
        logits = scores = self.res_model(input_embs)
        output = (scores,)
        if labels is not None:
            loss = self.loss_fct(logits, labels)
            output = output + (loss,)

        return output  # scores, (loss)

    def get_hidden_states(self, input_ids):
        input_embs = self.get_embs(input_ids)
        return self.res_model._forward_impl(input_embs)

def multilines_keywords_list2matrix(multilines_keywords_list,tokenizer,max_symbols,max_lines):
    """
    将每个例子的列表统一成固定大小的matrix
    :param multilines_keywords_list:
    :return:
    """
    matrixs = []
    for multilines_keywords in multilines_keywords_list:
        layers = multilines_keywords
        input_ids = [tokenizer.encode(layer, max_length=max_symbols, truncation=True, padding="max_length",
                                      add_special_tokens=True) for layer in layers]
        input_ids_np = np.stack(input_ids)
        if len(input_ids_np) < max_lines:
            matrix = np.pad(input_ids_np, ((0, max_lines - len(input_ids_np)), (0, 0)), 'constant',
                            constant_values=(0, 0))
        else:
            matrix = input_ids_np
        # print(input_ids_np.shape,matrix.shape)
        matrixs.append(matrix)
    return matrixs
def instances2tokenizer(instances,tokenizer,max_symbols,max_lines):
    """
    将每个样例id化
    :param instances:
    :param tokenizer:
    :param max_symbols:
    :param max_lines:
    :return:
    """
    true_labels = [instance["标签："] for instance in instances]
    multilines_keywords_list = [instance["layers"] for instance in instances]
    matrixs = multilines_keywords_list2matrix(multilines_keywords_list,tokenizer,max_symbols,max_lines)
    formulas = [instance["公式："] for instance in instances]

    return matrixs,formulas,true_labels


if __name__ == "__main__":
    # vocab and data instances
    from cnn_model.make_vocab import make_vocab
    # data preprocess
    data_vocab_info = make_vocab()
    instances = data_vocab_info["instances"]
    len_vocab = len(data_vocab_info["final_words"])
    labels2id = data_vocab_info["labels2id"]
    max_words = data_vocab_info["max_words"]
    max_symbols = max_words + 2
    max_lines = max_layers = data_vocab_info["max_layers"]
    batch_size = 16
    dim = 64

    tokenizer = BertTokenizer(vocab_file=f"vocab.txt", do_lower_case=False)
    tokenizer.unique_no_split_tokens = list(tokenizer.ids_to_tokens.values())
    input_ids = tokenizer.encode(text="!#(#&#)#\equiv#!#\otimes#!".replace("#", " "))[1:-1]
    input_ids = tokenizer.encode(text=['!', '(', '&', ')', '\equiv', '!', '\otimes', '!', '\\neg'])[1:-1]
    for input_id in input_ids:
        print(tokenizer.ids_to_tokens[input_id], end=' ')

    # model and embedding
    res_model = ResNet(num_classes=len(labels2id))
    embedding = Embedding(len_vocab, dim)
    formula_model = FormulaClassification(res_model=res_model, embedding=embedding)

    # instances to batch ids
    matrixs,formulas,true_labels = instances2tokenizer(tokenizer,max_symbols,max_lines)

    batch_data = np.stack(matrixs[:batch_size])
    batch_data = torch.from_numpy(batch_data)

    print(formula_model(batch_data)[0].shape)
    print(formula_model.get_hidden_states(batch_data).shape)
