from functools import partial
import argparse
import numpy as np
import time
import sys
import tritonclient.grpc as grpcclient
import re
from tritonclient.utils import InferenceServerException
from transformers import BertTokenizer

model_path = 'D:/PTM/chinse-roberta-wwm-ext'
tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=model_path)
if __name__ == '__main__':
    def callback(user_data, result, error):
        if error:
            user_data.append(error)
        else:
            user_data.append(result)


    # sentence = "我今天很想你"
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action="store_true",
                        required=False,
                        default=False,
                        help='Enable verbose output')
    parser.add_argument('-u',
                        '--url',
                        type=str,
                        required=False,
                        default='10.60.57.21:8001',
                        help='Inference server URL. Default is localhost:8001.')
    parser.add_argument('-t',
                        '--client-timeout',
                        type=float,
                        required=False,
                        default=None,
                        help='Client timeout in seconds. Default is None.')

    FLAGS = parser.parse_args()
    try:
        triton_client = grpcclient.InferenceServerClient(url=FLAGS.url,
                                                         verbose=FLAGS.verbose)
    except Exception as e:
        print("context creation failed: " + str(e))
        sys.exit()

    model_name = 'prompt_bert'
    if not triton_client.is_model_ready(model_name):
        print("FAILED : is_model_ready")
        sys.exit(1)
    # Infer
    inputs = []
    outputs = []
    inputs.append(grpcclient.InferInput('input_ids', [20, 123], "INT32"))
    inputs.append(grpcclient.InferInput('input_tem', [20, 123], "INT32"))

    # Create the data for the two input tensors. Initialize the first
    # to unique integers and the second to all ones.
    titles = ['英国首相府违规聚会调查报告认为政府“领导不力”---调查报告认为政府“领导不力”',
              '在欢声笑语中展现新时代新征程上精气神---2022年春节联欢晚会——在欢声笑语中展现新时代新征程上精气神',
              '因发生斗殴事件致2名犯人死亡 美国联邦监狱进入封锁状态---美国休斯敦发生枪击事件 致1名警员死亡',
              '英媒：泽连斯基下令三年扩军十万 敦促议员不要散布恐慌---泽连斯基签署法令：未来三年内扩军十万人',
              '冬季风暴渐平息 美部分地区降雪超50厘米---冬季风暴持续影响美国大部分地区',
              '英媒：特朗普大举筹款瞄准2024总统大选---特朗普又夸下海口：若2024再当选总统，将赦免国会大厦骚乱者',
              '多国政要和国际组织官员贺新春 祝新年如虎添翼---视频｜多位国际组织负责人及国家政要送上新春祝福',
              '“美国‘超额死亡’人数已近百万”---华尔街日报：疫情下，美国“超额死亡”人数已近百万',
              '日本外相：驻日美军入境新冠检测所用方法未被日本认可有效---外媒：驻日美军人员被曝离美前未进行新冠检测',
              '除夕，布林肯又发新闻公报拜年：愿虎年给所有人带来机遇、成功和健康---美国务卿布林肯拜年：愿虎年给所有人带来机遇、成功和健康']
    while 1:
        for i in titles:
            ori, sim = i.split('---')
            sentence = f'{ori}，它的意思是[MASK]。'
            sen_tmp = f'{ori}，这句话的意思是[MASK]。'
            oriids = tokenizer.encode_plus(sentence, max_length=123, padding='max_length')['input_ids']
            temids = tokenizer.encode_plus(sen_tmp, max_length=123, padding='max_length')['input_ids']
            dummy_input1 = np.array([oriids for i in range(20)], dtype=np.int32)
            dummy_input2 = np.array([temids  for i in range(20)], dtype=np.int32)
            # Initialize the data
            inputs[0].set_data_from_numpy(dummy_input1)
            inputs[1].set_data_from_numpy(dummy_input2)
            user_data = []
            outputs = [
                grpcclient.InferRequestedOutput('output'),
            ]
            start = time.time()
            # Inference call
            # triton_client.async_infer(model_name=model_name,
            #                           inputs=inputs,
            #                           callback=partial(callback, user_data),
            #                           outputs=outputs,
            #                           client_timeout=FLAGS.client_timeout)
            result = triton_client.infer(
                model_name=model_name,
                inputs=inputs,
                request_id=str('1'),  # request_id 值随便填
                outputs=outputs)
            # Wait until the results are available in user_data
            time_out = 1000
            end = time.time()
            output_array = result.as_numpy('output')
            print((end-start)*1000,output_array[0][:2])
            # while ((len(user_data) == 0) and time_out > 0):
            #     time_out = time_out - 1
            #     time.sleep(0.001)
            # #
            # # # Display and validate the available results
            # if ((len(user_data) == 1)):
            #     # Check for the errors
            #     t = time.time()
            #     print((t - start) * 1000, ori, user_data[0].as_numpy('output')[0][:2])

            # if type(user_data[0]) == InferenceServerException:
            #     print(user_data[0])
            #     sys.exit(1)
        #
        #     # Validate the values by matching with already computed expected
        #     # values.
        #     output0_data = np.argmax(user_data[0].as_numpy('output')[0])
        #     # output1_data = user_data[0].as_numpy('OUTPUT1')
