import pathlib
from transformers.convert_graph_to_onnx import convert_pytorch

model_path = "/home/gyf/pkg/bilstm-crf/pytorch_model.bin"

# convert_pytorch(model_path, opset=14, output="bilstm-crf.onnx", use_external_format=False)

import torch
import torchsummary

model = torch.load(model_path)

# print(model)

# torchsummary.summary(model, input_size=(1, 128))



categories = set()

# bert配置

config_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_L-12_H-768_A-12/vocab.txt'

def load_data(filename):
    """加载数据
    单条格式：[text, (start, end, label), (start, end, label), ...]，
    意味着text[start:end + 1]是类型为label的实体。
    """
    D = []
    with open(filename, encoding='utf-8') as f:
        f = f.read()
        for l in f.split('\n\n'):
            if not l:
                continue
            d = ['']
            for i, c in enumerate(l.split('\n')):
                char, flag = c.split(' ')
                d[0] += char
                if flag[0] == 'B':
                    d.append([i, i, flag[2:]])
                    categories.add(flag[2:])
                elif flag[0] == 'I':
                    d[-1][1] = i
            D.append(d)
    return D

# 标注数据
train_data = load_data('/home/gyf/pkg/bilstm-crf/china-people-daily-ner-corpus/example.train')
valid_data = load_data('/home/gyf/pkg/bilstm-crf/china-people-daily-ner-corpus/example.dev')
test_data = load_data('/home/gyf/pkg/bilstm-crf/china-people-daily-ner-corpus/example.test')

categories = list(sorted(categories))
print(test_data)


####################
# https://github.com/taishan1994/pytorch_bert_bilstm_crf_ner/tree/main/convert_onnx
import time
import numpy as np
from . import decodeUtils

NUM = 10

class ConverttOnnx:
    def __init__(self, args, model, tokenizer, idx2tag):
        self.args = args
        self.model = model
        self.tokenizer = tokenizer
        self.idx2tag = idx2tag

    def inference(self, texts):
        self.model.eval()
        with torch.no_grad():
            tokens = [i for i in texts]
            encode_dict = self.tokenizer.encode_plus(text=tokens,
                                                     max_length=self.args.max_seq_len,
                                                     pad_to_max_length=True,
                                                     return_token_type_ids=True,
                                                     return_attention_mask=True,
                                                     return_tensors="pt")
            token_ids = encode_dict['input_ids']
            attention_masks = encode_dict['attention_mask'].bool()
            token_type_ids = encode_dict['token_type_ids']
            s1 = time.time()
            for i in range(NUM):
                logits = self.model(token_ids, attention_masks, token_type_ids)
            # print(logits)
            e1 = time.time()
            print('原版耗时：', (e1 - s1) / NUM)
            if self.args.use_crf == 'True':
                output = logits
            else:
                output = logits.detach().cpu().numpy()
                output = np.argmax(output, axis=2)
            pred_entities = decodeUtils.bioes_decode(output[0][1:1 + len(texts)], texts, self.idx2tag)
            print(pred_entities)

    def convert(self, save_path):
        self.model.eval()
        inputs = {'token_ids': torch.ones(1, args.max_seq_len, dtype=torch.long),
                  'attention_masks': torch.ones(1, args.max_seq_len, dtype=torch.uint8),
                  'token_type_ids': torch.ones(1, args.max_seq_len, dtype=torch.long)}

        symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}
        with torch.no_grad():
            torch.onnx.export(
                self.model,
                (inputs["token_ids"],
                 inputs["attention_masks"],
                 inputs["token_type_ids"]),
                save_path,
                opset_version=11,
                do_constant_folding=True,
                input_names=["token_ids", "attention_masks", "token_type_ids"],
                output_names=["logits"],
                dynamic_axes={'token_ids': symbolic_names,
                              'attention_masks': symbolic_names,
                              'token_type_ids': symbolic_names,
                              'logits': symbolic_names}
            )

    def onnx_inference(self, ort_session, texts):
        def to_numpy(tensor):
            return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()

        tokens = [i for i in texts]
        encode_dict = tokenizer.encode_plus(text=tokens,
                                            max_length=args.max_seq_len,
                                            padding="max_length",
                                            truncation="longest_first",
                                            return_token_type_ids=True,
                                            return_attention_mask=True,
                                            return_tensors="pt")
        token_ids = encode_dict['input_ids']
        attention_masks = torch.tensor(encode_dict['attention_mask'], dtype=torch.uint8)
        token_type_ids = encode_dict['token_type_ids']
        token_ids = to_numpy(token_ids)
        attention_masks = to_numpy(attention_masks)
        token_type_ids = to_numpy(token_type_ids)
        s2 = time.time()
        for i in range(NUM):
            output = ort_session.run(None, {'token_ids': token_ids, "attention_masks": attention_masks,
                                            "token_type_ids": token_type_ids})
        e2 = time.time()
        print('onnx耗时：', (e2 - s2) / NUM)
        output = output[0]
        pred_entities = decodeUtils.bioes_decode(output[0][1:1 + len(texts)], texts, self.idx2tag)
        print(pred_entities)
