import os
import torch
import argparse
import onnxruntime as ort
print("onnxruntime device=", ort.get_device())
from models.transformers import BertConfig
from models.bert_for_ner import BertCrfForNer, BertCrfForNerBertPart
from models.layers.crf import CRF
from transformers import AutoModel, AutoTokenizer
parser = argparse.ArgumentParser()
parser.add_argument("--seq_len", default="64")  # Sequence length
args = parser.parse_args()
print("args=", args)
is_use_gpu = True
test_text_batch_size = 1
is_input_length_padding = True
max_len = 64


def split_model():
    """
    将2部分的模型拆开, 分别存储
    Returns:

    """
    pytorch_model_dir = "bert/"
    config = BertConfig.from_pretrained(pytorch_model_dir, cache_dir=None, )
    raw_model = BertCrfForNer.from_pretrained(pytorch_model_dir, config=config, cache_dir=None)
    raw_model.eval()

    model_bert_part = BertCrfForNerBertPart.from_pretrained(pytorch_model_dir,
                                                            config=config)  # 不直接从from_pretrained(model_path
    model_bert_part.eval()
    model_to_save = (
        model_bert_part.module if hasattr(model_bert_part, "module") else model_bert_part
    )  # Take care of distributed/parallel training
    output_dir = os.path.join(pytorch_model_dir, "bert_part")
    print("output_dir=", output_dir)
    # pdb.set_trace()
    model_to_save.save_pretrained(output_dir)

    model_crf_part = raw_model.crf
    crf_path = os.path.join(pytorch_model_dir, "bert_crf_static_len_64_part_crf.pt")
    torch.save(model_crf_part.state_dict(), crf_path)
    # # 分离出 bert_part
    print("crf_path=", crf_path)


def check_split_model():
    raw_pytorch_path = "experiments/clue_bert"
    config = BertConfig.from_pretrained(raw_pytorch_path, cache_dir=None, )
    raw_model = BertCrfForNer.from_pretrained(raw_pytorch_path, config=config, cache_dir=None)
    raw_model.eval()

    model_bert_part_file = os.path.join(raw_pytorch_path, "bert")
    model_bert_part_config = BertConfig.from_pretrained(model_bert_part_file, cache_dir=None, )
    model_bert_part = BertCrfForNerBertPart.from_pretrained(model_bert_part_file, config=model_bert_part_config,
                                                            cache_dir=None)
    model_bert_part.eval()

    # 另一种加载模型的方式, 如果使用 crf 保持方式, 则可以使用以下的方式加载模型
    # model_bert_part_file = os.path.join(raw_pytorch_path, "bert_part")
    # model_bert_part = BertCrfForNerBertPart()
    # model_bert_part.load_state_dict(torch.load(model_bert_part_file))
    # model_bert_part.eval()

    # 对比 inference 结果
    tokenizer = AutoTokenizer.from_pretrained(raw_pytorch_path)
    # Pytorch 上做 inference
    test_text = ["邀请你明晚9点来前海卓越1号T3栋43楼开会"] * test_text_batch_size
    print(len(test_text[0]))
    if is_input_length_padding:
        inputs_tensort = tokenizer(test_text, padding='max_length', truncation=True, max_length=max_len,
                                   return_tensors="pt")
    else:
        inputs_tensort = tokenizer(test_text, max_length=max_len, return_tensors="pt")
    raw_model_logits = raw_model(**inputs_tensort)  # 类型是 <class 'tuple'>
    bert_part_model_logits = model_bert_part(**inputs_tensort)
    # 此时 raw_model_outputs 和 bert_part_model_outputs 应该是相同的,

    # 再分别使用 CRF 层
    crf_model = CRF(num_tags=config.num_labels, batch_first=True)
    crf_model_path = os.path.join(raw_pytorch_path, "bert_crf_static_len_64_part_crf.pt")
    crf_model.load_state_dict(torch.load(crf_model_path))

    # 原始模型
    # pdb.set_trace()
    bert_part_crf_tags = crf_model.decode(bert_part_model_logits, inputs_tensort['attention_mask'])
    bert_part_crf_tags = bert_part_crf_tags.squeeze(0).cpu().numpy().tolist()
    # raw_model_logits[0].shape=torch.Size([1, 64, 3])
    raw_tags = raw_model.crf.decode(raw_model_logits[0], inputs_tensort['attention_mask'])
    raw_tags = raw_tags.squeeze(0).cpu().numpy().tolist()
    print(raw_tags)
    print(len(raw_tags[0]))
    print(bert_part_crf_tags)
    assert raw_tags == bert_part_crf_tags
    print("Check OK")


if __name__ == '__main__':
    # split_model()
    check_split_model()