# -*- encoding: utf-8 -*-
# author:lmolhw
# datetime:2021-3-23 15:00

"""
文件说明：
        测试ONNXRuntime-GPU
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import os
import sys
import string
from PIL import Image
import argparse
import torch
from torch import nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms

from config import ConfigOpt
from model import Model

import math
import cv2
import numpy as np
import time
import copy
# import onnx
import onnxruntime

from collections import OrderedDict
# from onnx import helper
# from onnx import AttributeProto, TensorProto, GraphProto
os.environ["CUDA_VISIBLE_DEVICES"] = "0"


class CTCLabelConverter(object):
    """ Convert between text-label and text-index """

    def __init__(self, character):
        # character (str): set of the possible characters.
        dict_character = list(character)

        self.dict = {}
        for i, char in enumerate(dict_character):
            # NOTE: 0 is reserved for 'blank' token required by CTCLoss
            self.dict[char] = i + 1

        self.character = ['[blank]'] + dict_character  # dummy '[blank]' token for CTCLoss (index 0)

    def encode(self, text):
        """convert text-label into text-index.
        input:
            text: text labels of each image. [batch_size]

        output:
            text: concatenated text index for CTCLoss.
                    [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
            length: length of each text. [batch_size]
        """
        length = [len(s) for s in text]
        text_all = ''.join(text)
        text_all = [self.dict[char] for char in text_all]

        return (torch.IntTensor(text_all), torch.IntTensor(length))

    def decode(self, text_index, length):
        """ convert text-index into text-label. """
        texts = []
        index = 0
        for l in length:
            t = text_index[index:index + l]

            char_list = []
            for i in range(l):
                if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):  # removing repeated characters and blank.
                    char_list.append(self.character[t[i]])
            text = ''.join(char_list)

            texts.append(text)
            index += l
        return texts


class InferResizeNormalize(object):

    def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation
        self.toTensor = transforms.ToTensor()

    def __call__(self, img):
        img = img.resize(self.size, self.interpolation)
        img = self.toTensor(img)
        # img.sub_(0.5).div_(0.5)
        return img


class NormalizePAD(object):
    def __init__(self, max_size, PAD_type='right'):
        self.toTensor = transforms.ToTensor()
        self.max_size = max_size
        self.max_width_half = math.floor(max_size[2] / 2)
        self.PAD_type = PAD_type

    def __call__(self, img):
        img = self.toTensor(img)
        img.sub_(0.5).div_(0.5)
        c, h, w = img.size()
        # Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
        # Pad_img[:, :, :w] = img  # right pad
        # if self.max_size[2] != w:  # add border Pad
        #     Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)

        return img


def ONNX_test(img_path):
    img = Image.open(img_path)
    if opt.rgb:
        c = 3
        img = img.convert('RGB')
    else:
        img = img.convert('L')
        c = 1

    w, h = img.size
    ratio = w / float(h)  # 得到宽高比
    # if math.ceil(self.opt.imgH * ratio) > self.opt.imgW:  # 当imgH*宽高比大于预设宽度时，最大宽度为预设宽度
    #     resized_w = self.opt.imgW
    # else:
    resized_w = math.ceil(opt.imgH * ratio)
    # img = img.resize((resized_w, self.opt.imgH), Image.BICUBIC)
    transformer = InferResizeNormalize((resized_w, opt.imgH))
    # Resize End

    img = transformer(img)
    img = img.view(1, *img.size())
    # onnx_inputs = Variable(img)
    # onnx_inputs = copy.deepcopy(img).cpu()
    onnx_inputs = np.array(img)


    # onnx_inputs is numpy array on cpu, create an OrtValue and place it on cuda device id = 0
    print(onnxruntime.get_device())
    ortvalue = onnxruntime.OrtValue.ortvalue_from_numpy(onnx_inputs, 'cuda', 0)
    session = onnxruntime.InferenceSession("./dynamic_image_rec.onnx")
    input_name = session.get_inputs()[0].name
    print('\t>>input: {}, {}, {}'.format(session.get_inputs()[0].name, session.get_inputs()[0].shape,
                                         session.get_inputs()[0].type))
    # _outputs = session.get_outputs()
    #
    # for kk in range(len(_outputs)):
    #     _out = _outputs[kk]
    #     print('\t>>out-{}: {}, {}, {}'.format(kk, _out.name, _out.shape, _out.type))

    # x = np.array(onnx_inputs).astype(np.float32)
    pre_start_time = time.time()
    p = session.run(None, {input_name: ortvalue})
    pre_end_time = time.time() - pre_start_time

    out1 = p[0]

    Onnx_str = converter.decode(np.argmax(p[0][0], axis=1), [p[0].shape[1]])

    print('============================================================================')
    print('>>summary Output:', Onnx_str)
    print('onnx out: {} \n{}'.format(np.shape(out1), out1))
    print('onnx pre time: {}'.format(pre_end_time))
    return pre_end_time


if __name__ == '__main__':
    opt = ConfigOpt()
    batch_size = 1

    opt.saved_model = 'saved_models/1-28_VGG_Croped_160W_Invoice_FT/mtl_best_accuracy.pth'
    opt.imgW = 640
    opt.rgb = None
    opt.FeatureExtraction = 'VGG'  # VGG|RCNN|ResNet|MobileNetV3|DenseNet|CNN_Lite3
    # ocr_rec = OcrRec(opt=opt)

    image_path = '/data1/lhw/workspace/OCR/demo_images'

    converter = CTCLabelConverter(opt.character)
    opt.num_class = len(converter.character)

    times = []
    for file in os.listdir(image_path):
        print(file)
        if file[-1] == 'g':
            test_img_path = os.path.join(image_path, file)
            print((test_img_path))
            per_time = ONNX_test(test_img_path)
            times.append(per_time)
    print('ave_time:{}'.format(np.average(times)))

