import torch
from model import PromptBert
import random
import os
from transformers import BertTokenizer
import torch.nn.functional as F
import torch.nn as nn
import onnxruntime
import numpy as np


class Config:
    root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
    model_path = '/data0/jianyu10/PTM/huggingface_model_cache/chinese-roberta-wwm-ext'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    dropout_prob = 0.25
    mask_ids = 103
    dropout = 0.15
    save_path = root_path + '/UseModel/'
    onnxpath = root_path + '/UseModel/' + 'promptnew.onnx'


class Torch2onnx:
    def __init__(self, config):
        super(Torch2onnx, self).__init__()
        self.config = config
        self.loadonnxflag = False

    def load_save(self, config):
        """
        多卡模型存为单卡
        """
        model = PromptBert(config).to(config.device)
        chechpoint = torch.load(config.save_path + '/spear_best.pth.tar')
        model = nn.DataParallel(model, device_ids=[2,3])
        model.load_state_dict(chechpoint['state_dict'])
        state = {
            'state_dict': model.module.state_dict()}
        torch.save(state, config.save_path + '/single_spear_best.pth.tar')


    def load_model(self, config):
        """
        加载torch模型
        """
        self.load_save(config)
        model = PromptBert(config).to(config.device)
        chechpoint = torch.load(config.save_path + '/single_spear_best.pth.tar')
        model.load_state_dict(chechpoint['state_dict'])
        model.eval()
        model = model.cuda()
        return model

    def convert(self):
        """
        将torch模型转换为onnx
        """
        if os.path.exists(self.config.onnxpath):
            print(f'onnx model has existed!')
            return
        torchmodel = self.load_model(self.config)
        input_names = ['input_ids', 'input_tem']
        output_names = ['output']
        input1 = torch.tensor([101] + [random.randint(100, 10000) for i in range(120)] + [103, 102], dtype=torch.long,
                              device=self.config.device).view(1, 123)
        input2 = torch.tensor([101] + [random.randint(100, 10000) for i in range(120)] + [103, 102], dtype=torch.long,
                              device=self.config.device).view(1, 123)
        dummy_input = (input1, input2)
        with torch.no_grad():
            torch.onnx.export(torchmodel, dummy_input, self.config.onnxpath, opset_version=11, verbose=True,
                              input_names=input_names, output_names=output_names,
                              dynamic_axes={'input_ids': [0, 100], 'input_tem': [0, 100], 'output': [0, 100]})

    def __getids(self, title, tokenizer):
        """
        处理标题为模型输入
        """
        sentence = f'{title}，它的意思是[MASK]。'
        sen_tmp = f'{title}，这句话的意思是[MASK]。'
        oriids = tokenizer.encode_plus(sentence)
        temids = tokenizer.encode_plus(sen_tmp)
        return oriids['input_ids'], temids['input_ids']

    def load_from_onnx(self):
        if not os.path.exists(self.config.onnxpath):
            print(f"onnx model doesn't exist!")
            return
        self.__load_tokenizer()
        sess_options = onnxruntime.SessionOptions()
        self.session = onnxruntime.InferenceSession(self.config.onnxpath, sess_options,
                                                    providers=['CPUExecutionProvider'])
        self.loadonnxflag = True

    def __load_tokenizer(self):
        self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=self.config.model_path)

    def forward(self, orititle, simtitle):
        if not self.loadonnxflag:
            print('frist of all you need load onnx model. [X.load_from_onnx()] ')
            return
        oriids, temids = self.__getids(orititle, self.tokenizer)
        simids, simtemids = self.__getids(simtitle, self.tokenizer)
        ort_inputs = {
            'input_ids': np.array([oriids]),
            'input_tem': np.array([temids]),
        }
        sim_inputs = {
            'input_ids': np.array([simids]),
            'input_tem': np.array([simtemids]),
        }
        ori_outputs = self.session.run(['output1'], ort_inputs)
        output1 = torch.tensor(ori_outputs[0])
        sim_outputs = self.session.run(['output1'], sim_inputs)
        output2 = torch.tensor(sim_outputs[0])
        sim = F.cosine_similarity(output1, output2, dim=-1)
        return {'orititle': output1, 'simtitle': output2, 'simscore': sim.item()}


if __name__ == '__main__':
    config = Config()
    t2o = Torch2onnx(config)
    t2o.convert()
